-
Notifications
You must be signed in to change notification settings - Fork 25.6k
Fold EngineDiskUtils into Store, for better lock semantics #29156
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Changes from 4 commits
61264de
5e6746b
0bcfa41
5ebca23
f9294c3
a91fffd
fb56e9e
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
This file was deleted.
| Original file line number | Diff line number | Diff line change |
|---|---|---|
|
|
@@ -31,6 +31,8 @@ | |
| import org.apache.lucene.index.IndexFormatTooOldException; | ||
| import org.apache.lucene.index.IndexNotFoundException; | ||
| import org.apache.lucene.index.IndexWriter; | ||
| import org.apache.lucene.index.IndexWriterConfig; | ||
| import org.apache.lucene.index.NoMergePolicy; | ||
| import org.apache.lucene.index.SegmentCommitInfo; | ||
| import org.apache.lucene.index.SegmentInfos; | ||
| import org.apache.lucene.store.AlreadyClosedException; | ||
|
|
@@ -47,7 +49,6 @@ | |
| import org.apache.lucene.util.ArrayUtil; | ||
| import org.apache.lucene.util.BytesRef; | ||
| import org.apache.lucene.util.BytesRefBuilder; | ||
| import org.elasticsearch.core.internal.io.IOUtils; | ||
| import org.apache.lucene.util.Version; | ||
| import org.elasticsearch.ElasticsearchException; | ||
| import org.elasticsearch.ExceptionsHelper; | ||
|
|
@@ -70,11 +71,13 @@ | |
| import org.elasticsearch.common.util.concurrent.AbstractRefCounted; | ||
| import org.elasticsearch.common.util.concurrent.RefCounted; | ||
| import org.elasticsearch.common.util.iterable.Iterables; | ||
| import org.elasticsearch.core.internal.io.IOUtils; | ||
| import org.elasticsearch.env.NodeEnvironment; | ||
| import org.elasticsearch.env.ShardLock; | ||
| import org.elasticsearch.env.ShardLockObtainFailedException; | ||
| import org.elasticsearch.index.IndexSettings; | ||
| import org.elasticsearch.index.engine.Engine; | ||
| import org.elasticsearch.index.engine.InternalEngine; | ||
| import org.elasticsearch.index.seqno.SequenceNumbers; | ||
| import org.elasticsearch.index.shard.AbstractIndexShardComponent; | ||
| import org.elasticsearch.index.shard.IndexShard; | ||
|
|
@@ -156,7 +159,8 @@ public Store(ShardId shardId, IndexSettings indexSettings, DirectoryService dire | |
| this(shardId, indexSettings, directoryService, shardLock, OnClose.EMPTY); | ||
| } | ||
|
|
||
| public Store(ShardId shardId, IndexSettings indexSettings, DirectoryService directoryService, ShardLock shardLock, OnClose onClose) throws IOException { | ||
| public Store(ShardId shardId, IndexSettings indexSettings, DirectoryService directoryService, ShardLock shardLock, | ||
| OnClose onClose) throws IOException { | ||
| super(shardId, indexSettings); | ||
| final Settings settings = indexSettings.getSettings(); | ||
| this.directory = new StoreDirectory(directoryService.newDirectory(), Loggers.getLogger("index.store.deletes", settings, shardId)); | ||
|
|
@@ -1455,4 +1459,102 @@ private static long estimateSize(Directory directory) throws IOException { | |
| } | ||
| } | ||
|
|
||
| /** | ||
| * creates an empty lucene index and a corresponding empty translog. Any existing data will be deleted. | ||
| */ | ||
| public void createEmpty() throws IOException { | ||
| metadataLock.writeLock().lock(); | ||
| try (IndexWriter writer = newIndexWriter(IndexWriterConfig.OpenMode.CREATE, directory)) { | ||
| final Map<String, String> map = new HashMap<>(); | ||
| map.put(Engine.HISTORY_UUID_KEY, UUIDs.randomBase64UUID()); | ||
| map.put(SequenceNumbers.LOCAL_CHECKPOINT_KEY, Long.toString(SequenceNumbers.NO_OPS_PERFORMED)); | ||
| map.put(SequenceNumbers.MAX_SEQ_NO, Long.toString(SequenceNumbers.NO_OPS_PERFORMED)); | ||
| map.put(InternalEngine.MAX_UNSAFE_AUTO_ID_TIMESTAMP_COMMIT_ID, "-1"); | ||
| updateCommitData(writer, map); | ||
| } finally { | ||
| metadataLock.writeLock().unlock(); | ||
| } | ||
| } | ||
|
|
||
|
|
||
| /** | ||
| * Marks an existing lucene index with a new history uuid. | ||
| * This is used to make sure no existing shard will recovery from this index using ops based recovery. | ||
| */ | ||
| public void bootstrapNewHistory() | ||
| throws IOException { | ||
|
||
| metadataLock.writeLock().lock(); | ||
| try (IndexWriter writer = newIndexWriter(IndexWriterConfig.OpenMode.APPEND, directory)) { | ||
| final Map<String, String> userData = getUserData(writer); | ||
| final long maxSeqNo = Long.parseLong(userData.get(SequenceNumbers.MAX_SEQ_NO)); | ||
| final Map<String, String> map = new HashMap<>(); | ||
| map.put(Engine.HISTORY_UUID_KEY, UUIDs.randomBase64UUID()); | ||
| map.put(SequenceNumbers.LOCAL_CHECKPOINT_KEY, Long.toString(maxSeqNo)); | ||
| updateCommitData(writer, map); | ||
| } finally { | ||
| metadataLock.writeLock().unlock(); | ||
| } | ||
| } | ||
|
|
||
| /** | ||
| * Force bakes the given translog generation as recovery information in the lucene index. This is | ||
| * used when recovering from a snapshot or peer file based recovery where a new empty translog is | ||
| * created and the existing lucene index needs should be changed to use it. | ||
| */ | ||
| public void associateIndexWithNewTranslog(final String translogUUID) | ||
|
||
| throws IOException { | ||
|
||
| metadataLock.writeLock().lock(); | ||
| try (IndexWriter writer = newIndexWriter(IndexWriterConfig.OpenMode.APPEND, directory)) { | ||
| if (translogUUID.equals(getUserData(writer).get(Translog.TRANSLOG_UUID_KEY))) { | ||
| throw new IllegalArgumentException("a new translog uuid can't be equal to existing one. got [" + translogUUID + "]"); | ||
| } | ||
| final Map<String, String> map = new HashMap<>(); | ||
| map.put(Translog.TRANSLOG_GENERATION_KEY, "1"); | ||
| map.put(Translog.TRANSLOG_UUID_KEY, translogUUID); | ||
| updateCommitData(writer, map); | ||
| } finally { | ||
| metadataLock.writeLock().unlock(); | ||
| } | ||
| } | ||
|
|
||
|
|
||
| /** | ||
| * Checks that the Lucene index contains a history uuid marker. If not, a new one is generated and committed. | ||
| */ | ||
| public void ensureIndexHasHistoryUUID() throws IOException { | ||
| metadataLock.writeLock().lock(); | ||
| try (IndexWriter writer = newIndexWriter(IndexWriterConfig.OpenMode.APPEND, directory)) { | ||
| final Map<String, String> userData = getUserData(writer); | ||
| if (userData.containsKey(Engine.HISTORY_UUID_KEY) == false) { | ||
| updateCommitData(writer, Collections.singletonMap(Engine.HISTORY_UUID_KEY, UUIDs.randomBase64UUID())); | ||
| } | ||
| } finally { | ||
| metadataLock.writeLock().unlock(); | ||
| } | ||
| } | ||
|
|
||
| private void updateCommitData(IndexWriter writer, Map<String, String> keysToUpdate) throws IOException { | ||
| final Map<String, String> userData = getUserData(writer); | ||
| userData.putAll(keysToUpdate); | ||
| writer.setLiveCommitData(userData.entrySet()); | ||
| writer.commit(); | ||
| } | ||
|
|
||
| private Map<String, String> getUserData(IndexWriter writer) { | ||
| final Map<String, String> userData = new HashMap<>(); | ||
| writer.getLiveCommitData().forEach(e -> userData.put(e.getKey(), e.getValue())); | ||
| return userData; | ||
| } | ||
|
|
||
| private IndexWriter newIndexWriter(IndexWriterConfig.OpenMode openMode, final Directory dir) throws IOException { | ||
| IndexWriterConfig iwc = new IndexWriterConfig(null) | ||
| .setCommitOnClose(false) | ||
| // we don't want merges to happen here - we call maybe merge on the engine | ||
| // later once we stared it up otherwise we would need to wait for it here | ||
| // we also don't specify a codec here and merges should use the engines for this index | ||
| .setMergePolicy(NoMergePolicy.INSTANCE) | ||
| .setOpenMode(openMode); | ||
| return new IndexWriter(dir, iwc); | ||
| } | ||
|
|
||
| } | ||
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
hmm why did this change?
Uh oh!
There was an error while loading. Please reload this page.
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
To create an empty shard, we now:
Store.createEmptyStore. associateIndexWithNewTranslogThis creates one more commit compared to how it used to be. I can change createEmpty to require a translogUUID as a parameter , if you prefer. I'm OK either way.