From 0e164f6d3577abfeb3f93bcb65248c2871d78a8c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Francisco=20Fern=C3=A1ndez=20Casta=C3=B1o?= Date: Tue, 5 Oct 2021 11:41:28 +0200 Subject: [PATCH] Respect generational files in recoveryDiff Today `MetadataSnapshot#recoveryDiff` considers the `.liv` file as per-commit rather than per-segment and often transfers them during peer recoveries and snapshot restores. It also considers differences in `.fnm`, `.dvd` and `.dvm` files as indicating a difference in the whole segment, even though these files may be adjusted without changing the segment itself. This commit adjusts this logic to attach these generational files to the segments themselves, allowing Elasticsearch only to transfer them if they are genuinely needed. Closes #55142 Backport of #77695 Co-authored-by: David Turner --- .../BlobStoreIndexShardSnapshot.java | 25 ++- .../org/elasticsearch/index/store/Store.java | 165 +++++++++++------- .../index/store/StoreFileMetadata.java | 79 ++++++++- .../blobstore/BlobStoreRepository.java | 33 +++- .../blobstore/ChecksumBlobStoreFormat.java | 39 ++++- .../snapshots/SnapshotsService.java | 7 + .../snapshots/blobstore/FileInfoTests.java | 37 +++- .../elasticsearch/index/store/StoreTests.java | 134 +++++++++----- .../SnapshotsRecoveryPlannerServiceTests.java | 2 +- 9 files changed, 387 insertions(+), 134 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/index/snapshots/blobstore/BlobStoreIndexShardSnapshot.java b/server/src/main/java/org/elasticsearch/index/snapshots/blobstore/BlobStoreIndexShardSnapshot.java index 337e494433c82..1d7081bdcc9b8 100644 --- a/server/src/main/java/org/elasticsearch/index/snapshots/blobstore/BlobStoreIndexShardSnapshot.java +++ b/server/src/main/java/org/elasticsearch/index/snapshots/blobstore/BlobStoreIndexShardSnapshot.java @@ -28,6 +28,8 @@ import java.util.Objects; import java.util.stream.IntStream; +import static org.elasticsearch.index.store.StoreFileMetadata.UNAVAILABLE_WRITER_UUID; + /** * Shard snapshot metadata */ @@ -37,6 +39,7 @@ public class BlobStoreIndexShardSnapshot implements ToXContentFragment { * Information about snapshotted file */ public static class FileInfo implements Writeable { + public static final String SERIALIZE_WRITER_UUID = "serialize_writer_uuid"; private final String name; private final ByteSizeValue partSize; @@ -239,6 +242,7 @@ public boolean isSame(FileInfo fileInfo) { static final String PART_SIZE = "part_size"; static final String WRITTEN_BY = "written_by"; static final String META_HASH = "meta_hash"; + static final String WRITER_UUID = "writer_uuid"; /** * Serializes file info into JSON @@ -261,10 +265,19 @@ public static void toXContent(FileInfo file, XContentBuilder builder, ToXContent builder.field(WRITTEN_BY, file.metadata.writtenBy()); } - if (file.metadata.hash() != null && file.metadata().hash().length > 0) { - BytesRef br = file.metadata.hash(); - builder.field(META_HASH, br.bytes, br.offset, br.length); + final BytesRef hash = file.metadata.hash(); + if (hash != null && hash.length > 0) { + builder.field(META_HASH, hash.bytes, hash.offset, hash.length); + } + + final BytesRef writerUuid = file.metadata.writerUuid(); + // We serialize by default when SERIALIZE_WRITER_UUID is not present since in deletes/clones + // we read the serialized files from the blob store and we enforce the version invariants when + // the snapshot was done + if (writerUuid.length > 0 && params.paramAsBoolean(SERIALIZE_WRITER_UUID, true)) { + builder.field(WRITER_UUID, writerUuid.bytes, writerUuid.offset, writerUuid.length); } + builder.endObject(); } @@ -283,6 +296,7 @@ public static FileInfo fromXContent(XContentParser parser) throws IOException { ByteSizeValue partSize = null; String writtenBy = null; BytesRef metaHash = new BytesRef(); + BytesRef writerUuid = UNAVAILABLE_WRITER_UUID; XContentParserUtils.ensureExpectedToken(token, XContentParser.Token.START_OBJECT, parser); while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { if (token == XContentParser.Token.FIELD_NAME) { @@ -305,6 +319,9 @@ public static FileInfo fromXContent(XContentParser parser) throws IOException { metaHash.bytes = parser.binaryValue(); metaHash.offset = 0; metaHash.length = metaHash.bytes.length; + } else if (WRITER_UUID.equals(currentFieldName)) { + writerUuid = new BytesRef(parser.binaryValue()); + assert writerUuid.length > 0; } else { XContentParserUtils.throwUnknownField(currentFieldName, parser.getTokenLocation()); } @@ -328,7 +345,7 @@ public static FileInfo fromXContent(XContentParser parser) throws IOException { } else if (checksum == null) { throw new ElasticsearchParseException("missing checksum for name [" + name + "]"); } - return new FileInfo(name, new StoreFileMetadata(physicalName, length, checksum, writtenBy, metaHash), partSize); + return new FileInfo(name, new StoreFileMetadata(physicalName, length, checksum, writtenBy, metaHash, writerUuid), partSize); } @Override diff --git a/server/src/main/java/org/elasticsearch/index/store/Store.java b/server/src/main/java/org/elasticsearch/index/store/Store.java index a837b08997451..698e956b1f335 100644 --- a/server/src/main/java/org/elasticsearch/index/store/Store.java +++ b/server/src/main/java/org/elasticsearch/index/store/Store.java @@ -52,10 +52,10 @@ import org.elasticsearch.common.lucene.store.InputStreamIndexInput; import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Setting.Property; -import org.elasticsearch.core.TimeValue; import org.elasticsearch.core.AbstractRefCounted; import org.elasticsearch.core.RefCounted; -import org.elasticsearch.common.util.iterable.Iterables; +import org.elasticsearch.core.TimeValue; +import org.elasticsearch.core.Tuple; import org.elasticsearch.core.internal.io.IOUtils; import org.elasticsearch.env.NodeEnvironment; import org.elasticsearch.env.ShardLock; @@ -83,13 +83,14 @@ import java.util.Iterator; import java.util.List; import java.util.Map; -import java.util.Set; import java.util.Optional; +import java.util.Set; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.locks.ReentrantReadWriteLock; import java.util.function.Consumer; import java.util.function.LongUnaryOperator; +import java.util.function.Predicate; import java.util.zip.CRC32; import java.util.zip.Checksum; @@ -850,16 +851,22 @@ static LoadedMetadata loadMetadata(IndexCommit commit, Directory directory, Logg if (version.onOrAfter(maxVersion)) { maxVersion = version; } + + final BytesRef segmentInfoId = StoreFileMetadata.toWriterUuid(info.info.getId()); + final BytesRef segmentCommitInfoId = StoreFileMetadata.toWriterUuid(info.getId()); + for (String file : info.files()) { checksumFromLuceneFile(directory, file, builder, logger, version.toString(), - SEGMENT_INFO_EXTENSION.equals(IndexFileNames.getExtension(file))); + SEGMENT_INFO_EXTENSION.equals(IndexFileNames.getExtension(file)), + IndexFileNames.parseGeneration(file) == 0 ? segmentInfoId : segmentCommitInfoId); } } if (maxVersion == null) { maxVersion = org.elasticsearch.Version.CURRENT.minimumIndexCompatibilityVersion().luceneVersion; } final String segmentsFile = segmentCommitInfos.getSegmentsFileName(); - checksumFromLuceneFile(directory, segmentsFile, builder, logger, maxVersion.toString(), true); + checksumFromLuceneFile(directory, segmentsFile, builder, logger, maxVersion.toString(), true, + StoreFileMetadata.toWriterUuid(segmentCommitInfos.getId())); } catch (CorruptIndexException | IndexNotFoundException | IndexFormatTooOldException | IndexFormatTooNewException ex) { // we either know the index is corrupted or it's just not there throw ex; @@ -885,7 +892,7 @@ static LoadedMetadata loadMetadata(IndexCommit commit, Directory directory, Logg } private static void checksumFromLuceneFile(Directory directory, String file, Map builder, - Logger logger, String version, boolean readFileAsHash) throws IOException { + Logger logger, String version, boolean readFileAsHash, BytesRef writerUuid) throws IOException { final String checksum; final BytesRefBuilder fileHash = new BytesRefBuilder(); try (IndexInput in = directory.openInput(file, READONCE_CHECKSUM)) { @@ -910,7 +917,7 @@ private static void checksumFromLuceneFile(Directory directory, String file, Map logger.debug(() -> new ParameterizedMessage("Can retrieve checksum from file [{}]", file), ex); throw ex; } - builder.put(file, new StoreFileMetadata(file, length, checksum, version, fileHash.get())); + builder.put(file, new StoreFileMetadata(file, length, checksum, version, fileHash.get(), writerUuid)); } } @@ -939,8 +946,6 @@ public Map asMap() { return metadata; } - private static final String DEL_FILE_EXTENSION = "del"; // legacy delete file - private static final String LIV_FILE_EXTENSION = "liv"; // lucene 5 delete file private static final String SEGMENT_INFO_EXTENSION = "si"; /** @@ -951,80 +956,107 @@ public Map asMap() { *
  • different: they exist in both snapshots but their they are not identical
  • *
  • missing: files that exist in the source but not in the target
  • * - * This method groups file into per-segment files and per-commit files. A file is treated as - * identical if and on if all files in it's group are identical. On a per-segment level files for a segment are treated - * as identical iff: - *
      - *
    • all files in this segment have the same checksum
    • - *
    • all files in this segment have the same length
    • - *
    • the segments {@code .si} files hashes are byte-identical Note: This is a using a perfect hash function, - * The metadata transfers the {@code .si} file content as it's hash
    • - *
    *

    - * The {@code .si} file contains a lot of diagnostics including a timestamp etc. in the future there might be - * unique segment identifiers in there hardening this method further. + * Individual files are compared by name, length, checksum and (if present) a UUID that was assigned when the file was originally + * written. The segment info ({@code *.si}) files and the segments file ({@code segments_N}) are also checked to be a byte-for-byte + * match. *

    - * The per-commit files handles very similar. A commit is composed of the {@code segments_N} files as well as generational files - * like deletes ({@code _x_y.del}) or field-info ({@code _x_y.fnm}) files. On a per-commit level files for a commit are treated - * as identical iff: - *

      - *
    • all files belonging to this commit have the same checksum
    • - *
    • all files belonging to this commit have the same length
    • - *
    • the segments file {@code segments_N} files hashes are byte-identical Note: This is a using a perfect hash function, - * The metadata transfers the {@code segments_N} file content as it's hash
    • - *
    + * Files are collected together into a group for each segment plus one group of "per-commit" ({@code segments_N}) files. Each + * per-segment group is subdivided into a nongenerational group (most of them) and a generational group (e.g. {@code *.liv}, + * {@code *.fnm}, {@code *.dvm}, {@code *.dvd} that have been updated by subsequent commits). *

    - * NOTE: this diff will not contain the {@code segments.gen} file. This file is omitted on recovery. + * For each segment, if any nongenerational files are different then the whole segment is considered to be different and will be + * recovered in full. If all the nongenerational files are the same but any generational files are different then all the + * generational files are considered to be different and will be recovered in full, but the nongenerational files are left alone. + * Finally, if any file is different then all the per-commit files are recovered too. */ - public RecoveryDiff recoveryDiff(MetadataSnapshot recoveryTargetSnapshot) { + public RecoveryDiff recoveryDiff(final MetadataSnapshot targetSnapshot) { + final List perCommitSourceFiles = new ArrayList<>(); + final Map, List>> perSegmentSourceFiles = new HashMap<>(); + // per segment, a tuple of <> + + for (StoreFileMetadata sourceFile : this) { + if (sourceFile.name().startsWith("_")) { + final String segmentId = IndexFileNames.parseSegmentName(sourceFile.name()); + final boolean isGenerationalFile = IndexFileNames.parseGeneration(sourceFile.name()) > 0L; + final Tuple, List> perSegmentTuple = perSegmentSourceFiles + .computeIfAbsent(segmentId, k -> Tuple.tuple(new ArrayList<>(), new ArrayList<>())); + (isGenerationalFile ? perSegmentTuple.v2() : perSegmentTuple.v1()).add(sourceFile); + } else { + assert sourceFile.name().startsWith(IndexFileNames.SEGMENTS + "_") : "unexpected " + sourceFile; + perCommitSourceFiles.add(sourceFile); + } + } + final List identical = new ArrayList<>(); final List different = new ArrayList<>(); final List missing = new ArrayList<>(); - final Map> perSegment = new HashMap<>(); - final List perCommitStoreFiles = new ArrayList<>(); - for (StoreFileMetadata meta : this) { - if (IndexFileNames.OLD_SEGMENTS_GEN.equals(meta.name())) { // legacy - continue; // we don't need that file at all + final List tmpIdentical = new ArrayList<>(); // confirm whole group is identical before adding to 'identical' + final Predicate> groupComparer = sourceGroup -> { + assert tmpIdentical.isEmpty() : "not cleaned up: " + tmpIdentical; + boolean groupIdentical = true; + for (StoreFileMetadata sourceFile : sourceGroup) { + final StoreFileMetadata targetFile = targetSnapshot.get(sourceFile.name()); + if (targetFile == null) { + groupIdentical = false; + missing.add(sourceFile); + } else if (groupIdentical && targetFile.isSame(sourceFile)) { + tmpIdentical.add(sourceFile); + } else { + groupIdentical = false; + different.add(sourceFile); + } } - final String segmentId = IndexFileNames.parseSegmentName(meta.name()); - final String extension = IndexFileNames.getExtension(meta.name()); - if (IndexFileNames.SEGMENTS.equals(segmentId) || - DEL_FILE_EXTENSION.equals(extension) || LIV_FILE_EXTENSION.equals(extension)) { - // only treat del files as per-commit files fnm files are generational but only for upgradable DV - perCommitStoreFiles.add(meta); + if (groupIdentical) { + identical.addAll(tmpIdentical); } else { - perSegment.computeIfAbsent(segmentId, k -> new ArrayList<>()).add(meta); + different.addAll(tmpIdentical); } - } - final ArrayList identicalFiles = new ArrayList<>(); - for (List segmentFiles : Iterables.concat(perSegment.values(), Collections.singleton(perCommitStoreFiles))) { - identicalFiles.clear(); - boolean consistent = true; - for (StoreFileMetadata meta : segmentFiles) { - StoreFileMetadata storeFileMetadata = recoveryTargetSnapshot.get(meta.name()); - if (storeFileMetadata == null) { - consistent = false; - missing.add(meta); - } else if (storeFileMetadata.isSame(meta) == false) { - consistent = false; - different.add(meta); + tmpIdentical.clear(); + return groupIdentical; + }; + final Consumer> allDifferent = sourceGroup -> { + for (StoreFileMetadata sourceFile : sourceGroup) { + final StoreFileMetadata targetFile = targetSnapshot.get(sourceFile.name()); + if (targetFile == null) { + missing.add(sourceFile); } else { - identicalFiles.add(meta); + different.add(sourceFile); } } - if (consistent) { - identical.addAll(identicalFiles); + }; + + boolean segmentsIdentical = true; + + for (Tuple, List> segmentFiles : perSegmentSourceFiles.values()) { + final List nonGenerationalFiles = segmentFiles.v1(); + final List generationalFiles = segmentFiles.v2(); + + if (groupComparer.test(nonGenerationalFiles)) { + // non-generational files are identical, now check the generational files + segmentsIdentical = groupComparer.test(generationalFiles) && segmentsIdentical; } else { - // make sure all files are added - this can happen if only the deletes are different - different.addAll(identicalFiles); + // non-generational files were different, so consider the whole segment as different + segmentsIdentical = false; + allDifferent.accept(generationalFiles); } } - RecoveryDiff recoveryDiff = new RecoveryDiff(Collections.unmodifiableList(identical), - Collections.unmodifiableList(different), Collections.unmodifiableList(missing)); - assert recoveryDiff.size() == this.metadata.size() - (metadata.containsKey(IndexFileNames.OLD_SEGMENTS_GEN) ? 1 : 0) - : "some files are missing recoveryDiff size: [" + recoveryDiff.size() + "] metadata size: [" + - this.metadata.size() + "] contains segments.gen: [" + metadata.containsKey(IndexFileNames.OLD_SEGMENTS_GEN) + "]"; + + if (segmentsIdentical) { + // segments were the same, check the per-commit files + groupComparer.test(perCommitSourceFiles); + } else { + // at least one segment was different, so treat all the per-commit files as different too + allDifferent.accept(perCommitSourceFiles); + } + + final RecoveryDiff recoveryDiff = new RecoveryDiff( + Collections.unmodifiableList(identical), + Collections.unmodifiableList(different), + Collections.unmodifiableList(missing)); + assert recoveryDiff.size() == metadata.size() : "some files are missing: recoveryDiff is [" + recoveryDiff + + "] comparing: [" + metadata + "] to [" + targetSnapshot.metadata + "]"; return recoveryDiff; } @@ -1128,7 +1160,6 @@ public String toString() { } } - /** * Returns true if the file is auto-generated by the store and shouldn't be deleted during cleanup. * This includes write lock files diff --git a/server/src/main/java/org/elasticsearch/index/store/StoreFileMetadata.java b/server/src/main/java/org/elasticsearch/index/store/StoreFileMetadata.java index 01ce1e3fb055a..f1e157a617c7b 100644 --- a/server/src/main/java/org/elasticsearch/index/store/StoreFileMetadata.java +++ b/server/src/main/java/org/elasticsearch/index/store/StoreFileMetadata.java @@ -9,12 +9,17 @@ package org.elasticsearch.index.store; import org.apache.lucene.codecs.CodecUtil; +import org.apache.lucene.index.IndexWriter; +import org.apache.lucene.index.SegmentCommitInfo; +import org.apache.lucene.index.SegmentInfo; +import org.apache.lucene.index.SegmentInfos; import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.Version; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.common.lucene.store.ByteArrayIndexInput; +import org.elasticsearch.core.Nullable; import java.io.IOException; import java.text.ParseException; @@ -22,6 +27,9 @@ public class StoreFileMetadata implements Writeable { + public static final BytesRef UNAVAILABLE_WRITER_UUID = new BytesRef(); + private static final org.elasticsearch.Version WRITER_UUID_MIN_VERSION = org.elasticsearch.Version.V_7_16_0; + private final String name; // the actual file size on "disk", if compressed, the compressed size @@ -33,17 +41,22 @@ public class StoreFileMetadata implements Writeable { private final BytesRef hash; + private final BytesRef writerUuid; + public StoreFileMetadata(String name, long length, String checksum, String writtenBy) { - this(name, length, checksum, writtenBy, null); + this(name, length, checksum, writtenBy, null, UNAVAILABLE_WRITER_UUID); } - public StoreFileMetadata(String name, long length, String checksum, String writtenBy, BytesRef hash) { + public StoreFileMetadata(String name, long length, String checksum, String writtenBy, BytesRef hash, BytesRef writerUuid) { assert assertValidWrittenBy(writtenBy); this.name = Objects.requireNonNull(name, "name must not be null"); this.length = length; this.checksum = Objects.requireNonNull(checksum, "checksum must not be null"); this.writtenBy = Objects.requireNonNull(writtenBy, "writtenBy must not be null"); this.hash = hash == null ? new BytesRef() : hash; + + assert writerUuid != null && (writerUuid.length > 0 || writerUuid == UNAVAILABLE_WRITER_UUID); + this.writerUuid = Objects.requireNonNull(writerUuid, "writerUuid must not be null"); } /** @@ -55,6 +68,11 @@ public StoreFileMetadata(StreamInput in) throws IOException { checksum = in.readString(); writtenBy = in.readString(); hash = in.readBytesRef(); + if (in.getVersion().onOrAfter(WRITER_UUID_MIN_VERSION)) { + writerUuid = StoreFileMetadata.toWriterUuid(in.readBytesRef()); + } else { + writerUuid = UNAVAILABLE_WRITER_UUID; + } } @Override @@ -64,6 +82,9 @@ public void writeTo(StreamOutput out) throws IOException { out.writeString(checksum); out.writeString(writtenBy); out.writeBytesRef(hash); + if (out.getVersion().onOrAfter(WRITER_UUID_MIN_VERSION)) { + out.writeBytesRef(writerUuid); + } } /** @@ -119,6 +140,24 @@ public boolean isSame(StoreFileMetadata other) { // we can't tell if either or is null so we return false in this case! this is why we don't use equals for this! return false; } + + // If we have the file contents, we directly compare the contents. This is useful to compare segment info + // files of source-only snapshots where the original segment info file shares the same id as the source-only + // segment info file but its contents are different. + if (hashEqualsContents()) { + return hash.equals(other.hash); + } + + if (writerUuid.length > 0 && other.writerUuid.length > 0) { + // if the writer ID is missing on one of the files then we ignore this field and just rely on the checksum and hash, but if + // it's present on both files then it must be identical + if (writerUuid.equals(other.writerUuid) == false) { + return false; + } else { + assert name.equals(other.name) && length == other.length && checksum.equals(other.checksum) : this + " vs " + other; + assert hash.equals(other.hash) : this + " vs " + other + " with hashes " + hash + " vs " + other.hash; + } + } return length == other.length && checksum.equals(other.checksum) && hash.equals(other.hash); } @@ -142,6 +181,42 @@ public BytesRef hash() { return hash; } + /** + * Returns the globally-unique ID that was assigned by the {@link IndexWriter} that originally wrote this file: + * + * - For `segments_N` files this is {@link SegmentInfos#getId()} which uniquely identifies the commit. + * - For non-generational segment files this is {@link SegmentInfo#getId()} which uniquely identifies the segment. + * - For generational segment files (i.e. updated docvalues, liv files etc) this is {@link SegmentCommitInfo#getId()} + * which uniquely identifies the generation of the segment. + * + * This ID may be {@link StoreFileMetadata#UNAVAILABLE_WRITER_UUID} (i.e. zero-length) if unavilable, e.g.: + * + * - The file was written by a version of Lucene prior to {@link org.apache.lucene.util.Version#LUCENE_8_6_0}. + * - The metadata came from a version of Elasticsearch prior to {@link StoreFileMetadata#WRITER_UUID_MIN_VERSION}). + * - The file is not one of the files listed above. + * + */ + public BytesRef writerUuid() { + return writerUuid; + } + + static BytesRef toWriterUuid(BytesRef bytesRef) { + if (bytesRef.length == 0) { + return UNAVAILABLE_WRITER_UUID; + } else { + return bytesRef; + } + } + + static BytesRef toWriterUuid(@Nullable byte[] id) { + if (id == null) { + return UNAVAILABLE_WRITER_UUID; + } else { + assert id.length > 0; + return new BytesRef(id); + } + } + private static boolean assertValidWrittenBy(String writtenBy) { try { Version.parse(writtenBy); diff --git a/server/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreRepository.java b/server/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreRepository.java index eb910c55c3994..91cfeae40da8c 100644 --- a/server/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreRepository.java +++ b/server/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreRepository.java @@ -2757,6 +2757,7 @@ public void snapshotShard(SnapshotShardContext context) { final ShardGeneration indexGeneration; final boolean writeShardGens = SnapshotsService.useShardGenerations(context.getRepositoryMetaVersion()); + final boolean writeFileInfoWriterUUID = SnapshotsService.includeFileInfoWriterUUID(context.getRepositoryMetaVersion()); // build a new BlobStoreIndexShardSnapshot, that includes this one and all the saved ones List newSnapshotsList = new ArrayList<>(); newSnapshotsList.add(new SnapshotFiles(snapshotId.getName(), indexCommitPointFiles, context.stateIdentifier())); @@ -2771,11 +2772,16 @@ public void snapshotShard(SnapshotShardContext context) { // reference a generation that has not had all its files fully upload. indexGeneration = ShardGeneration.newGeneration(); try { + final Map serializationParams = Collections.singletonMap( + BlobStoreIndexShardSnapshot.FileInfo.SERIALIZE_WRITER_UUID, + Boolean.toString(writeFileInfoWriterUUID) + ); INDEX_SHARD_SNAPSHOTS_FORMAT.write( updatedBlobStoreIndexShardSnapshots, shardContainer, indexGeneration.toBlobNamePart(), - compress + compress, + serializationParams ); } catch (IOException e) { throw new IndexShardSnapshotFailedException( @@ -2809,7 +2815,11 @@ public void snapshotShard(SnapshotShardContext context) { + blobsToDelete; afterWriteSnapBlob = () -> { try { - writeShardIndexBlobAtomic(shardContainer, newGen, updatedBlobStoreIndexShardSnapshots); + final Map serializationParams = Collections.singletonMap( + BlobStoreIndexShardSnapshot.FileInfo.SERIALIZE_WRITER_UUID, + Boolean.toString(writeFileInfoWriterUUID) + ); + writeShardIndexBlobAtomic(shardContainer, newGen, updatedBlobStoreIndexShardSnapshots, serializationParams); } catch (IOException e) { throw new IndexShardSnapshotFailedException( shardId, @@ -2853,7 +2863,17 @@ public void snapshotShard(SnapshotShardContext context) { ); try { final String snapshotUUID = snapshotId.getUUID(); - INDEX_SHARD_SNAPSHOT_FORMAT.write(blobStoreIndexShardSnapshot, shardContainer, snapshotUUID, compress); + final Map serializationParams = Collections.singletonMap( + BlobStoreIndexShardSnapshot.FileInfo.SERIALIZE_WRITER_UUID, + Boolean.toString(writeFileInfoWriterUUID) + ); + INDEX_SHARD_SNAPSHOT_FORMAT.write( + blobStoreIndexShardSnapshot, + shardContainer, + snapshotUUID, + compress, + serializationParams + ); } catch (IOException e) { throw new IndexShardSnapshotFailedException(shardId, "Failed to write commit point", e); } @@ -3255,7 +3275,7 @@ private ShardSnapshotMetaDeleteResult deleteFromShardSnapshotMeta( INDEX_SHARD_SNAPSHOTS_FORMAT.write(updatedSnapshots, shardContainer, writtenGeneration.toBlobNamePart(), compress); } else { writtenGeneration = new ShardGeneration(indexGeneration); - writeShardIndexBlobAtomic(shardContainer, indexGeneration, updatedSnapshots); + writeShardIndexBlobAtomic(shardContainer, indexGeneration, updatedSnapshots, Collections.emptyMap()); } final Set survivingSnapshotUUIDs = survivingSnapshots.stream().map(SnapshotId::getUUID).collect(Collectors.toSet()); return new ShardSnapshotMetaDeleteResult( @@ -3285,7 +3305,8 @@ private ShardSnapshotMetaDeleteResult deleteFromShardSnapshotMeta( private void writeShardIndexBlobAtomic( BlobContainer shardContainer, long indexGeneration, - BlobStoreIndexShardSnapshots updatedSnapshots + BlobStoreIndexShardSnapshots updatedSnapshots, + Map serializationParams ) throws IOException { assert indexGeneration >= 0 : "Shard generation must not be negative but saw [" + indexGeneration + "]"; logger.trace( @@ -3295,7 +3316,7 @@ private void writeShardIndexBlobAtomic( writeAtomic( shardContainer, blobName, - out -> INDEX_SHARD_SNAPSHOTS_FORMAT.serialize(updatedSnapshots, blobName, compress, out), + out -> INDEX_SHARD_SNAPSHOTS_FORMAT.serialize(updatedSnapshots, blobName, compress, serializationParams, out), true ); } diff --git a/server/src/main/java/org/elasticsearch/repositories/blobstore/ChecksumBlobStoreFormat.java b/server/src/main/java/org/elasticsearch/repositories/blobstore/ChecksumBlobStoreFormat.java index 9882cd89caa61..79a83205cbac9 100644 --- a/server/src/main/java/org/elasticsearch/repositories/blobstore/ChecksumBlobStoreFormat.java +++ b/server/src/main/java/org/elasticsearch/repositories/blobstore/ChecksumBlobStoreFormat.java @@ -38,6 +38,7 @@ import java.io.OutputStream; import java.util.Collections; import java.util.Locale; +import java.util.Map; import java.util.zip.CRC32; /** @@ -263,7 +264,7 @@ private int getAvailable() throws IOException { /** * Writes blob with resolving the blob name using {@link #blobName} method. *

    - * The blob will optionally by compressed. + * The blob will optionally be compressed. * * @param obj object to be serialized * @param blobContainer blob container @@ -271,11 +272,37 @@ private int getAvailable() throws IOException { * @param compress whether to use compression */ public void write(T obj, BlobContainer blobContainer, String name, boolean compress) throws IOException { + write(obj, blobContainer, name, compress, Collections.emptyMap()); + } + + /** + * Writes blob with resolving the blob name using {@link #blobName} method. + *

    + * The blob will optionally be compressed. + * + * @param obj object to be serialized + * @param blobContainer blob container + * @param name blob name + * @param compress whether to use compression + * @param serializationParams extra serialization parameters + */ + public void write(T obj, BlobContainer blobContainer, String name, boolean compress, Map serializationParams) + throws IOException { final String blobName = blobName(name); - blobContainer.writeBlob(blobName, false, false, out -> serialize(obj, blobName, compress, out)); + blobContainer.writeBlob(blobName, false, false, out -> serialize(obj, blobName, compress, serializationParams, out)); } - public void serialize(final T obj, final String blobName, final boolean compress, OutputStream outputStream) throws IOException { + public void serialize(final T obj, final String blobName, final boolean compress, final OutputStream outputStream) throws IOException { + serialize(obj, blobName, compress, Collections.emptyMap(), outputStream); + } + + public void serialize( + final T obj, + final String blobName, + final boolean compress, + final Map extraParams, + final OutputStream outputStream + ) throws IOException { try ( OutputStreamIndexOutput indexOutput = new OutputStreamIndexOutput( "ChecksumBlobStoreFormat.serialize(blob=\"" + blobName + "\")", @@ -297,8 +324,12 @@ public void close() { compress ? CompressorFactory.COMPRESSOR.threadLocalOutputStream(indexOutputOutputStream) : indexOutputOutputStream ) ) { + ToXContent.Params params = extraParams.isEmpty() + ? SNAPSHOT_ONLY_FORMAT_PARAMS + : new ToXContent.DelegatingMapParams(extraParams, SNAPSHOT_ONLY_FORMAT_PARAMS); + builder.startObject(); - obj.toXContent(builder, SNAPSHOT_ONLY_FORMAT_PARAMS); + obj.toXContent(builder, params); builder.endObject(); } CodecUtil.writeFooter(indexOutput); diff --git a/server/src/main/java/org/elasticsearch/snapshots/SnapshotsService.java b/server/src/main/java/org/elasticsearch/snapshots/SnapshotsService.java index 95eab2d06dd82..33833b300fd36 100644 --- a/server/src/main/java/org/elasticsearch/snapshots/SnapshotsService.java +++ b/server/src/main/java/org/elasticsearch/snapshots/SnapshotsService.java @@ -139,6 +139,9 @@ public class SnapshotsService extends AbstractLifecycleComponent implements Clus public static final Version UUIDS_IN_REPO_DATA_VERSION = Version.V_7_12_0; + // TODO: Update to 7.16 after backporting + public static final Version FILE_INFO_WRITER_UUIDS_IN_SHARD_DATA_VERSION = Version.CURRENT; + public static final Version OLD_SNAPSHOT_FORMAT = Version.V_7_5_0; public static final Version MULTI_DELETE_VERSION = Version.V_7_8_0; @@ -3000,6 +3003,10 @@ public static boolean includesUUIDs(Version repositoryMetaVersion) { return repositoryMetaVersion.onOrAfter(UUIDS_IN_REPO_DATA_VERSION); } + public static boolean includeFileInfoWriterUUID(Version repositoryMetaVersion) { + return repositoryMetaVersion.onOrAfter(FILE_INFO_WRITER_UUIDS_IN_SHARD_DATA_VERSION); + } + /** Deletes snapshot from repository * * @param deleteEntry delete entry in cluster state diff --git a/server/src/test/java/org/elasticsearch/index/snapshots/blobstore/FileInfoTests.java b/server/src/test/java/org/elasticsearch/index/snapshots/blobstore/FileInfoTests.java index be78fa36fa47b..3dc9e6474d2b6 100644 --- a/server/src/test/java/org/elasticsearch/index/snapshots/blobstore/FileInfoTests.java +++ b/server/src/test/java/org/elasticsearch/index/snapshots/blobstore/FileInfoTests.java @@ -23,7 +23,9 @@ import org.elasticsearch.test.ESTestCase; import java.io.IOException; +import java.util.Collections; +import static org.elasticsearch.index.store.StoreFileMetadata.UNAVAILABLE_WRITER_UUID; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.is; @@ -35,6 +37,7 @@ public class FileInfoTests extends ESTestCase { public void testToFromXContent() throws IOException { final int iters = scaledRandomIntBetween(1, 10); for (int iter = 0; iter < iters; iter++) { + final BytesRef writerUuid = randomBytesRef(20); final BytesRef hash = new BytesRef(scaledRandomIntBetween(0, 1024 * 1024)); hash.length = hash.bytes.length; for (int i = 0; i < hash.length; i++) { @@ -45,12 +48,23 @@ public void testToFromXContent() throws IOException { Math.abs(randomLong()), randomAlphaOfLengthBetween(1, 10), Version.LATEST.toString(), - hash + hash, + writerUuid ); ByteSizeValue size = new ByteSizeValue(Math.abs(randomLong())); BlobStoreIndexShardSnapshot.FileInfo info = new BlobStoreIndexShardSnapshot.FileInfo("_foobar", meta, size); XContentBuilder builder = XContentFactory.contentBuilder(XContentType.JSON).prettyPrint(); - BlobStoreIndexShardSnapshot.FileInfo.toXContent(info, builder, ToXContent.EMPTY_PARAMS); + boolean serializeWriterUUID = randomBoolean(); + final ToXContent.Params params; + if (serializeWriterUUID && randomBoolean()) { + // We serialize by the writer uuid by default + params = new ToXContent.MapParams(Collections.emptyMap()); + } else { + params = new ToXContent.MapParams( + Collections.singletonMap(FileInfo.SERIALIZE_WRITER_UUID, Boolean.toString(serializeWriterUUID)) + ); + } + BlobStoreIndexShardSnapshot.FileInfo.toXContent(info, builder, params); byte[] xcontent = BytesReference.toBytes(BytesReference.bytes(shuffleXContent(builder))); final BlobStoreIndexShardSnapshot.FileInfo parsedInfo; @@ -66,18 +80,27 @@ public void testToFromXContent() throws IOException { assertThat(parsedInfo.metadata().hash().length, equalTo(hash.length)); assertThat(parsedInfo.metadata().hash(), equalTo(hash)); assertThat(parsedInfo.metadata().writtenBy(), equalTo(Version.LATEST.toString())); + if (serializeWriterUUID) { + assertThat(parsedInfo.metadata().writerUuid(), equalTo(writerUuid)); + } else { + assertThat(parsedInfo.metadata().writerUuid(), equalTo(UNAVAILABLE_WRITER_UUID)); + } assertThat(parsedInfo.isSame(info.metadata()), is(true)); } } + private static BytesRef randomBytesRef(int maxSize) { + final BytesRef hash = new BytesRef(scaledRandomIntBetween(1, maxSize)); + hash.length = hash.bytes.length; + for (int i = 0; i < hash.length; i++) { + hash.bytes[i] = randomByte(); + } + return hash; + } + public void testInvalidFieldsInFromXContent() throws IOException { final int iters = scaledRandomIntBetween(1, 10); for (int iter = 0; iter < iters; iter++) { - final BytesRef hash = new BytesRef(scaledRandomIntBetween(0, 1024 * 1024)); - hash.length = hash.bytes.length; - for (int i = 0; i < hash.length; i++) { - hash.bytes[i] = randomByte(); - } String name = "foobar"; String physicalName = "_foobar"; String failure = null; diff --git a/server/src/test/java/org/elasticsearch/index/store/StoreTests.java b/server/src/test/java/org/elasticsearch/index/store/StoreTests.java index 3bc8d8b3688f4..6940b81e14eee 100644 --- a/server/src/test/java/org/elasticsearch/index/store/StoreTests.java +++ b/server/src/test/java/org/elasticsearch/index/store/StoreTests.java @@ -9,6 +9,7 @@ import org.apache.lucene.analysis.MockAnalyzer; import org.apache.lucene.codecs.CodecUtil; +import org.apache.lucene.document.BinaryDocValuesField; import org.apache.lucene.document.Document; import org.apache.lucene.document.Field; import org.apache.lucene.document.SortedDocValuesField; @@ -475,11 +476,16 @@ public void testRecoveryDiff() throws IOException, InterruptedException { List docs = new ArrayList<>(); for (int i = 0; i < numDocs; i++) { Document doc = new Document(); - doc.add(new StringField("id", "" + i, random().nextBoolean() ? Field.Store.YES : Field.Store.NO)); - doc.add(new TextField("body", - TestUtil.randomRealisticUnicodeString(random()), random().nextBoolean() ? Field.Store.YES : Field.Store.NO)); - doc.add(new SortedDocValuesField("dv", new BytesRef(TestUtil.randomRealisticUnicodeString(random())))); + final Field.Store stringFieldStored = random().nextBoolean() ? Field.Store.YES : Field.Store.NO; + doc.add(new StringField("id", "" + i, stringFieldStored)); + final String textFieldContent = TestUtil.randomRealisticUnicodeString(random()); + final Field.Store textFieldStored = random().nextBoolean() ? Field.Store.YES : Field.Store.NO; + doc.add(new TextField("body", textFieldContent, textFieldStored)); + final String docValueFieldContent = TestUtil.randomRealisticUnicodeString(random()); + doc.add(new BinaryDocValuesField("dv", new BytesRef(docValueFieldContent))); docs.add(doc); + logger.info("--> doc [{}] id=[{}] (store={}) body=[{}] (store={}) dv=[{}]", + i, i, stringFieldStored, textFieldContent, textFieldStored, docValueFieldContent); } long seed = random().nextLong(); Store.MetadataSnapshot first; @@ -494,9 +500,8 @@ public void testRecoveryDiff() throws IOException, InterruptedException { final boolean lotsOfSegments = rarely(random); for (Document d : docs) { writer.addDocument(d); - if (lotsOfSegments && random.nextBoolean()) { - writer.commit(); - } else if (rarely(random)) { + if (lotsOfSegments && random.nextBoolean() || rarely(random)) { + logger.info("--> commit after doc {}", d.getField("id").stringValue()); writer.commit(); } } @@ -523,9 +528,7 @@ public void testRecoveryDiff() throws IOException, InterruptedException { final boolean lotsOfSegments = rarely(random); for (Document d : docs) { writer.addDocument(d); - if (lotsOfSegments && random.nextBoolean()) { - writer.commit(); - } else if (rarely(random)) { + if (lotsOfSegments && random.nextBoolean() || rarely(random)) { writer.commit(); } } @@ -550,35 +553,40 @@ public void testRecoveryDiff() throws IOException, InterruptedException { assertThat(selfDiff.different, empty()); assertThat(selfDiff.missing, empty()); - - // lets add some deletes - Random random = new Random(seed); - IndexWriterConfig iwc = new IndexWriterConfig(new MockAnalyzer(random)).setCodec(TestUtil.getDefaultCodec()); - iwc.setMergePolicy(NoMergePolicy.INSTANCE); - iwc.setUseCompoundFile(random.nextBoolean()); - iwc.setOpenMode(IndexWriterConfig.OpenMode.APPEND); - IndexWriter writer = new IndexWriter(store.directory(), iwc); - writer.deleteDocuments(new Term("id", Integer.toString(random().nextInt(numDocs)))); - writer.commit(); - writer.close(); - Store.MetadataSnapshot metadata = store.getMetadata(null); + // delete a doc + final String deleteId = Integer.toString(random().nextInt(numDocs)); + Store.MetadataSnapshot metadata; + { + Random random = new Random(seed); + IndexWriterConfig iwc = new IndexWriterConfig(new MockAnalyzer(random)).setCodec(TestUtil.getDefaultCodec()); + iwc.setMergePolicy(NoMergePolicy.INSTANCE); + iwc.setUseCompoundFile(random.nextBoolean()); + iwc.setOpenMode(IndexWriterConfig.OpenMode.APPEND); + IndexWriter writer = new IndexWriter(store.directory(), iwc); + logger.info("--> delete doc {}", deleteId); + writer.deleteDocuments(new Term("id", deleteId)); + writer.commit(); + writer.close(); + metadata = store.getMetadata(null); + } StoreFileMetadata delFile = null; for (StoreFileMetadata md : metadata) { if (md.name().endsWith(".liv")) { delFile = md; + logger.info("--> delFile=[{}]", delFile); break; } } Store.RecoveryDiff afterDeleteDiff = metadata.recoveryDiff(second); if (delFile != null) { - assertThat(afterDeleteDiff.identical.size(), equalTo(metadata.size() - 2)); // segments_N + del file - assertThat(afterDeleteDiff.different.size(), equalTo(0)); - assertThat(afterDeleteDiff.missing.size(), equalTo(2)); + assertThat(afterDeleteDiff.toString(), afterDeleteDiff.identical.size(), equalTo(metadata.size() - 2)); // segments_N + del file + assertThat(afterDeleteDiff.toString(), afterDeleteDiff.different.size(), equalTo(0)); + assertThat(afterDeleteDiff.toString(), afterDeleteDiff.missing.size(), equalTo(2)); } else { // an entire segment must be missing (single doc segment got dropped) - assertThat(afterDeleteDiff.identical.size(), greaterThan(0)); - assertThat(afterDeleteDiff.different.size(), equalTo(0)); - assertThat(afterDeleteDiff.missing.size(), equalTo(1)); // the commit file is different + assertThat(afterDeleteDiff.toString(), afterDeleteDiff.identical.size(), greaterThan(0)); + assertThat(afterDeleteDiff.toString(), afterDeleteDiff.different.size(), equalTo(0)); + assertThat(afterDeleteDiff.toString(), afterDeleteDiff.missing.size(), equalTo(1)); // the commit file is different } // check the self diff @@ -588,30 +596,70 @@ public void testRecoveryDiff() throws IOException, InterruptedException { assertThat(selfDiff.missing, empty()); // add a new commit - iwc = new IndexWriterConfig(new MockAnalyzer(random)).setCodec(TestUtil.getDefaultCodec()); - iwc.setMergePolicy(NoMergePolicy.INSTANCE); - iwc.setUseCompoundFile(true); // force CFS - easier to test here since we know it will add 3 files - iwc.setOpenMode(IndexWriterConfig.OpenMode.APPEND); - writer = new IndexWriter(store.directory(), iwc); - writer.addDocument(docs.get(0)); - writer.close(); + { + Random random = new Random(seed); + IndexWriterConfig iwc = new IndexWriterConfig(new MockAnalyzer(random)).setCodec(TestUtil.getDefaultCodec()); + iwc.setMergePolicy(NoMergePolicy.INSTANCE); + iwc.setUseCompoundFile(true); // force CFS - easier to test here since we know it will add 3 files + iwc.setOpenMode(IndexWriterConfig.OpenMode.APPEND); + IndexWriter writer = new IndexWriter(store.directory(), iwc); + logger.info("--> add new empty doc"); + writer.addDocument(new Document()); + writer.close(); + } Store.MetadataSnapshot newCommitMetadata = store.getMetadata(null); Store.RecoveryDiff newCommitDiff = newCommitMetadata.recoveryDiff(metadata); if (delFile != null) { - assertThat(newCommitDiff.identical.size(), - equalTo(newCommitMetadata.size() - 5)); // segments_N, del file, cfs, cfe, si for the new segment - assertThat(newCommitDiff.different.size(), equalTo(1)); // the del file must be different - assertThat(newCommitDiff.different.get(0).name(), endsWith(".liv")); - assertThat(newCommitDiff.missing.size(), equalTo(4)); // segments_N,cfs, cfe, si for the new segment + assertThat(newCommitDiff.toString(), newCommitDiff.identical.size(), + equalTo(newCommitMetadata.size() - 4)); // segments_N, cfs, cfe, si for the new segment + assertThat(newCommitDiff.toString(), newCommitDiff.different.size(), equalTo(0)); // the del file must be different + assertThat(newCommitDiff.toString(), newCommitDiff.missing.size(), equalTo(4)); // segments_N,cfs, cfe, si for the new segment + assertTrue(newCommitDiff.toString(), newCommitDiff.identical.stream().anyMatch(m -> m.name().endsWith(".liv"))); } else { - assertThat(newCommitDiff.identical.size(), + assertThat(newCommitDiff.toString(), newCommitDiff.identical.size(), equalTo(newCommitMetadata.size() - 4)); // segments_N, cfs, cfe, si for the new segment - assertThat(newCommitDiff.different.size(), equalTo(0)); - assertThat(newCommitDiff.missing.size(), + assertThat(newCommitDiff.toString(), newCommitDiff.different.size(), equalTo(0)); + assertThat(newCommitDiff.toString(), newCommitDiff.missing.size(), equalTo(4)); // an entire segment must be missing (single doc segment got dropped) plus the commit is different } + // update doc values + Store.MetadataSnapshot dvUpdateSnapshot; + final String updateId = randomValueOtherThan(deleteId, () -> Integer.toString(random().nextInt(numDocs))); + { + Random random = new Random(seed); + IndexWriterConfig iwc = new IndexWriterConfig(new MockAnalyzer(random)).setCodec(TestUtil.getDefaultCodec()); + iwc.setMergePolicy(NoMergePolicy.INSTANCE); + iwc.setUseCompoundFile(random.nextBoolean()); + iwc.setOpenMode(IndexWriterConfig.OpenMode.APPEND); + try(IndexWriter writer = new IndexWriter(store.directory(), iwc)) { + final String newDocValue = TestUtil.randomRealisticUnicodeString(random()); + logger.info("--> update doc [{}] with dv=[{}]", updateId, newDocValue); + writer.updateBinaryDocValue(new Term("id", updateId), "dv", new BytesRef(newDocValue)); + writer.commit(); + } + dvUpdateSnapshot = store.getMetadata(null); + } + logger.info("--> source: {}", dvUpdateSnapshot.asMap()); + logger.info("--> target: {}", newCommitMetadata.asMap()); + Store.RecoveryDiff dvUpdateDiff = dvUpdateSnapshot.recoveryDiff(newCommitMetadata); + final int delFileCount; + if (delFile == null || dvUpdateDiff.different.isEmpty()) { + // liv file either doesn't exist or belongs to a different segment from the one that we just updated + delFileCount = 0; + assertThat(dvUpdateDiff.toString(), dvUpdateDiff.different, empty()); + } else { + // liv file is generational and belongs to the updated segment + delFileCount = 1; + assertThat(dvUpdateDiff.toString(), dvUpdateDiff.different.size(), equalTo(1)); + assertThat(dvUpdateDiff.toString(), dvUpdateDiff.different.get(0).name(), endsWith(".liv")); + } + + assertThat(dvUpdateDiff.toString(), dvUpdateDiff.identical.size(), equalTo(dvUpdateSnapshot.size() - 4 - delFileCount)); + assertThat(dvUpdateDiff.toString(), dvUpdateDiff.different.size(), equalTo(delFileCount)); + assertThat(dvUpdateDiff.toString(), dvUpdateDiff.missing.size(), equalTo(4)); // segments_N, fnm, dvd, dvm for the updated segment + deleteContent(store.directory()); IOUtils.close(store); } diff --git a/server/src/test/java/org/elasticsearch/indices/recovery/plan/SnapshotsRecoveryPlannerServiceTests.java b/server/src/test/java/org/elasticsearch/indices/recovery/plan/SnapshotsRecoveryPlannerServiceTests.java index 6d00b3a931ef1..077cf634ab46c 100644 --- a/server/src/test/java/org/elasticsearch/indices/recovery/plan/SnapshotsRecoveryPlannerServiceTests.java +++ b/server/src/test/java/org/elasticsearch/indices/recovery/plan/SnapshotsRecoveryPlannerServiceTests.java @@ -471,7 +471,7 @@ private void assertUsesExpectedSnapshot(ShardRecoveryPlan shardRecoveryPlan, // StoreFileMetadata doesn't implement #equals, we rely on StoreFileMetadata#isSame for equality checks private boolean containsFile(List files, StoreFileMetadata fileMetadata) { for (StoreFileMetadata file : files) { - if (file.isSame(fileMetadata)) { + if (fileMetadata.name().equals(file.name()) && file.isSame(fileMetadata)) { return true; } }