diff --git a/Polypheny-DB.ipr b/Polypheny-DB.ipr new file mode 100644 index 0000000000..09f4d9ece6 --- /dev/null +++ b/Polypheny-DB.ipr @@ -0,0 +1,105 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + 1.6 + + + + + + + + + + + + + diff --git a/core/src/main/java/org/polypheny/db/adapter/AdapterSettingDeserializer.java b/core/src/main/java/org/polypheny/db/adapter/AdapterSettingDeserializer.java new file mode 100644 index 0000000000..6c90cac5d7 --- /dev/null +++ b/core/src/main/java/org/polypheny/db/adapter/AdapterSettingDeserializer.java @@ -0,0 +1,79 @@ +/* + * Copyright 2019-2023 The Polypheny Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.polypheny.db.adapter; + +import com.google.gson.JsonDeserializationContext; +import com.google.gson.JsonDeserializer; +import com.google.gson.JsonElement; +import com.google.gson.JsonObject; +import com.google.gson.JsonParseException; +import java.lang.reflect.Type; +import java.util.ArrayList; +import java.util.List; + + +// See https://stackoverflow.com/questions/19588020/gson-serialize-a-list-of-polymorphic-objects/22081826#22081826 +public class AdapterSettingDeserializer implements JsonDeserializer { + + @Override + public AbstractAdapterSetting deserialize( JsonElement json, Type typeOfT, JsonDeserializationContext context ) throws JsonParseException { + JsonObject jsonObject = json.getAsJsonObject(); + String type = jsonObject.get( "type" ).getAsString(); + String name = jsonObject.get( "name" ).getAsString(); + boolean canBeNull = jsonObject.get( "canBeNull" ).getAsBoolean(); + boolean required = jsonObject.get( "required" ).getAsBoolean(); + boolean modifiable = jsonObject.get( "modifiable" ).getAsBoolean(); + String subOf = jsonObject.has( "subOf" ) ? jsonObject.get( "subOf" ).getAsString() : null; + int position = jsonObject.get( "position" ).getAsInt(); + String description = null; + if ( jsonObject.get( "description" ) != null ) { + description = jsonObject.get( "description" ).getAsString(); + } + + AbstractAdapterSetting out; + switch ( type ) { + case "Integer": + int integer = jsonObject.get( "defaultValue" ).getAsInt(); + out = new AbstractAdapterSettingInteger( name, canBeNull, subOf, required, modifiable, integer, new ArrayList<>(), position ); + break; + case "String": + String string = jsonObject.get( "defaultValue" ).getAsString(); + out = new AbstractAdapterSettingString( name, canBeNull, subOf, required, modifiable, string, new ArrayList<>(), position ); + break; + case "Boolean": + boolean bool = jsonObject.get( "defaultValue" ).getAsBoolean(); + out = new AbstractAdapterSettingBoolean( name, canBeNull, subOf, required, modifiable, bool, new ArrayList<>(), position ); + break; + case "List": + List options = context.deserialize( jsonObject.get( "options" ), List.class ); + String defaultValue = context.deserialize( jsonObject.get( "defaultValue" ), String.class ); + out = new AbstractAdapterSettingList( name, canBeNull, subOf, required, modifiable, options, new ArrayList<>(), defaultValue, position ); + break; + case "Directory": + String directory = context.deserialize( jsonObject.get( "directory" ), String.class ); + String[] fileNames = context.deserialize( jsonObject.get( "fileNames" ), String[].class ); + String value = context.deserialize( jsonObject.get( "defaultValue" ), String.class ); + out = new AbstractAdapterSettingDirectory( name, value, canBeNull, subOf, required, modifiable, new ArrayList<>(), position ).setDirectory( directory ).setFileNames( fileNames ); + break; + default: + throw new RuntimeException( "Could not deserialize AdapterSetting of type " + type ); + } + out.setDescription( description ); + return out; + } + +} diff --git a/core/src/main/java/org/polypheny/db/algebra/core/DocumentAggregateCall.java b/core/src/main/java/org/polypheny/db/algebra/core/DocumentAggregateCall.java new file mode 100644 index 0000000000..5a3960a8b9 --- /dev/null +++ b/core/src/main/java/org/polypheny/db/algebra/core/DocumentAggregateCall.java @@ -0,0 +1,84 @@ +/* + * Copyright 2019-2023 The Polypheny Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.polypheny.db.algebra.core; + +import java.util.List; +import java.util.Optional; +import org.polypheny.db.algebra.AlgCollations; +import org.polypheny.db.algebra.fun.AggFunction; +import org.polypheny.db.algebra.type.AlgDataType; +import org.polypheny.db.catalog.exceptions.GenericRuntimeException; +import org.polypheny.db.plan.AlgCluster; +import org.polypheny.db.rex.RexNode; +import org.polypheny.db.type.PolyType; + +public class DocumentAggregateCall { + + public final String name; + public final AggFunction function; + private final RexNode input; + + + public DocumentAggregateCall( String name, AggFunction function, RexNode input ) { + this.name = name; + this.function = function; + this.input = input; + } + + + public static DocumentAggregateCall create( String name, AggFunction function, RexNode input ) { + return new DocumentAggregateCall( name, function, input ); + } + + + public Optional getInput() { + return Optional.ofNullable( input ); + } + + + public AggregateCall toAggCall( AlgDataType rowType, AlgCluster cluster ) { + int index = rowType.getFieldNames().indexOf( name ); + return AggregateCall.create( function, false, false, List.of( index ), -1, AlgCollations.EMPTY, getType( cluster ), name ); + } + + + private AlgDataType getType( AlgCluster cluster ) { + switch ( function.getKind() ) { + case COUNT: + return cluster.getTypeFactory().createPolyType( PolyType.BIGINT ); + case SUM: + case AVG: + return cluster.getTypeFactory().createPolyType( PolyType.DOUBLE ); + default: + throw new GenericRuntimeException( "Unknown aggregate function: " + function.getKind() ); + } + } + + + public Optional requiresCast( AlgCluster cluster ) { + switch ( function.getKind() ) { + case COUNT: + return Optional.empty(); + case SUM: + case AVG: + return Optional.ofNullable( cluster.getTypeFactory().createPolyType( PolyType.DOUBLE ) ); + default: + return Optional.empty(); + } + } + +} diff --git a/core/src/main/java/org/polypheny/db/algebra/enumerable/EnumerableInterpretable.java b/core/src/main/java/org/polypheny/db/algebra/enumerable/EnumerableInterpretable.java index 196224d546..687846af7c 100644 --- a/core/src/main/java/org/polypheny/db/algebra/enumerable/EnumerableInterpretable.java +++ b/core/src/main/java/org/polypheny/db/algebra/enumerable/EnumerableInterpretable.java @@ -12,23 +12,6 @@ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. - * - * This file incorporates code covered by the following terms: - * - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to you under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. */ package org.polypheny.db.algebra.enumerable; diff --git a/core/src/main/java/org/polypheny/db/algebra/enumerable/EnumerableProjectRule.java b/core/src/main/java/org/polypheny/db/algebra/enumerable/EnumerableProjectRule.java index fea9a05c9d..8d44d36388 100644 --- a/core/src/main/java/org/polypheny/db/algebra/enumerable/EnumerableProjectRule.java +++ b/core/src/main/java/org/polypheny/db/algebra/enumerable/EnumerableProjectRule.java @@ -12,23 +12,6 @@ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. - * - * This file incorporates code covered by the following terms: - * - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to you under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. */ package org.polypheny.db.algebra.enumerable; diff --git a/core/src/main/java/org/polypheny/db/algebra/enumerable/EnumerableRelTableFunctionScan.java b/core/src/main/java/org/polypheny/db/algebra/enumerable/EnumerableRelTableFunctionScan.java index 327821a37b..dd04d39cdb 100644 --- a/core/src/main/java/org/polypheny/db/algebra/enumerable/EnumerableRelTableFunctionScan.java +++ b/core/src/main/java/org/polypheny/db/algebra/enumerable/EnumerableRelTableFunctionScan.java @@ -108,4 +108,3 @@ private boolean isQueryable() { } } - diff --git a/core/src/main/java/org/polypheny/db/algebra/enumerable/JavaTupleFormat.java b/core/src/main/java/org/polypheny/db/algebra/enumerable/JavaTupleFormat.java index 27bce081e2..a3b1a8c51f 100644 --- a/core/src/main/java/org/polypheny/db/algebra/enumerable/JavaTupleFormat.java +++ b/core/src/main/java/org/polypheny/db/algebra/enumerable/JavaTupleFormat.java @@ -274,4 +274,3 @@ public Expression comparer() { */ public abstract Expression field( Expression expression, int field, Type fromType, Type fieldType ); } - diff --git a/core/src/main/java/org/polypheny/db/algebra/logical/common/LogicalStreamer.java b/core/src/main/java/org/polypheny/db/algebra/logical/common/LogicalStreamer.java index 10b64cf25e..388f98d78b 100644 --- a/core/src/main/java/org/polypheny/db/algebra/logical/common/LogicalStreamer.java +++ b/core/src/main/java/org/polypheny/db/algebra/logical/common/LogicalStreamer.java @@ -184,7 +184,6 @@ public static void attachFilter( AlgNode modify, AlgBuilder algBuilder, RexBuild } } - attachFilter( modify.getEntity(), algBuilder, rexBuilder, indexes ); } diff --git a/core/src/main/java/org/polypheny/db/algebra/metadata/AlgMetadataQuery.java b/core/src/main/java/org/polypheny/db/algebra/metadata/AlgMetadataQuery.java index 4374f6786d..9a57526c82 100644 --- a/core/src/main/java/org/polypheny/db/algebra/metadata/AlgMetadataQuery.java +++ b/core/src/main/java/org/polypheny/db/algebra/metadata/AlgMetadataQuery.java @@ -236,6 +236,7 @@ public Double getTupleCount( AlgNode alg ) { rowCountHandler = revise( e.algClass, TupleCount.DEF ); } catch ( CyclicMetadataException e ) { log.warn( "Cyclic metadata detected while computing row count for {}", alg ); + return 100D; } } } diff --git a/core/src/main/java/org/polypheny/db/catalog/impl/NCatalog.java b/core/src/main/java/org/polypheny/db/catalog/impl/NCatalog.java new file mode 100644 index 0000000000..2cfedd7df9 --- /dev/null +++ b/core/src/main/java/org/polypheny/db/catalog/impl/NCatalog.java @@ -0,0 +1,43 @@ +/* + * Copyright 2019-2023 The Polypheny Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.polypheny.db.catalog.impl; + +import io.activej.serializer.annotations.SerializeClass; +import org.polypheny.db.catalog.impl.logical.DocumentCatalog; +import org.polypheny.db.catalog.impl.logical.GraphCatalog; +import org.polypheny.db.catalog.impl.logical.RelationalCatalog; +import org.polypheny.db.catalog.logistic.DataModel; + +@SerializeClass(subclasses = { GraphCatalog.class, RelationalCatalog.class, DocumentCatalog.class }) // required for deserialization +public interface NCatalog { + + void commit(); + + void rollback(); + + boolean hasUncommittedChanges(); + + DataModel getType(); + + default T unwrap( Class clazz ) { + if ( !this.getClass().isAssignableFrom( clazz ) ) { + throw new RuntimeException( String.format( "Error on retrieval the %s catalog.", clazz.getSimpleName() ) ); + } + return clazz.cast( this ); + } + +} diff --git a/core/src/main/java/org/polypheny/db/catalog/impl/Persister.java b/core/src/main/java/org/polypheny/db/catalog/impl/Persister.java new file mode 100644 index 0000000000..c84d62859e --- /dev/null +++ b/core/src/main/java/org/polypheny/db/catalog/impl/Persister.java @@ -0,0 +1,85 @@ +/* + * Copyright 2019-2023 The Polypheny Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.polypheny.db.catalog.impl; + +import com.drew.lang.Charsets; +import java.io.BufferedReader; +import java.io.File; +import java.io.FileReader; +import java.io.FileWriter; +import java.io.IOException; +import java.util.Optional; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.Executors; +import org.polypheny.db.catalog.exceptions.GenericRuntimeException; +import org.polypheny.db.util.PolyphenyHomeDirManager; + +public class Persister { + + ExecutorService service = Executors.newSingleThreadExecutor(); + + + private final File backup; + + + public Persister() { + this.backup = initBackupFile(); + } + + + private static File initBackupFile() { + if ( PolyphenyHomeDirManager.getInstance().getHomeFile( "catalog" ).isEmpty() ) { + PolyphenyHomeDirManager.getInstance().registerNewFolder( "catalog" ); + } + Optional folder = PolyphenyHomeDirManager.getInstance().getHomeFile( "catalog" ); + if ( !folder.map( File::isDirectory ).orElse( false ) ) { + throw new GenericRuntimeException( "There is an error with the catalog folder in the .polypheny folder." ); + } + return PolyphenyHomeDirManager.getInstance().registerNewFile( "catalog/catalog.poly" ); + } + + + public synchronized void write( String data ) { + service.execute( () -> { + try { + FileWriter writer = new FileWriter( backup, Charsets.ISO_8859_1 ); + writer.write( data ); + writer.flush(); + writer.close(); + } catch ( IOException e ) { + throw new GenericRuntimeException( e ); + } + } ); + } + + + public synchronized String read() { + StringBuilder data = new StringBuilder(); + try { + BufferedReader reader = new BufferedReader( new FileReader( backup, Charsets.ISO_8859_1 ) ); + int c; + while ( ((c = reader.read()) != -1) ) { + data.append( (char) c ); + } + reader.close(); + } catch ( IOException e ) { + throw new GenericRuntimeException( e ); + } + return data.toString(); + } + +} diff --git a/core/src/main/java/org/polypheny/db/catalog/logistic/ForeignKeyOption.java b/core/src/main/java/org/polypheny/db/catalog/logistic/ForeignKeyOption.java index 84bfaee234..f4d8d7622f 100644 --- a/core/src/main/java/org/polypheny/db/catalog/logistic/ForeignKeyOption.java +++ b/core/src/main/java/org/polypheny/db/catalog/logistic/ForeignKeyOption.java @@ -17,6 +17,7 @@ package org.polypheny.db.catalog.logistic; import lombok.NonNull; +import org.polypheny.db.catalog.exceptions.GenericRuntimeException; public enum ForeignKeyOption { NONE( -1 ), @@ -61,6 +62,6 @@ public static ForeignKeyOption parse( @NonNull String str ) { } else if ( str.equalsIgnoreCase( "SET DEFAULT" ) ) { return ForeignKeyOption.SET_DEFAULT; }*/ - throw new RuntimeException( "Unknown ForeignKeyOption with name: " + str ); + throw new GenericRuntimeException( "Unknown ForeignKeyOption with name: " + str ); } } diff --git a/core/src/main/java/org/polypheny/db/catalog/persistance/Persister.java b/core/src/main/java/org/polypheny/db/catalog/persistance/Persister.java index ff5ae0a009..63d9316626 100644 --- a/core/src/main/java/org/polypheny/db/catalog/persistance/Persister.java +++ b/core/src/main/java/org/polypheny/db/catalog/persistance/Persister.java @@ -22,4 +22,4 @@ public interface Persister { String read(); -} +} \ No newline at end of file diff --git a/core/src/main/java/org/polypheny/db/catalog/snapshot/LogicalRelSnapshot.java b/core/src/main/java/org/polypheny/db/catalog/snapshot/LogicalRelSnapshot.java index 4d95c66dfd..ba4821aa75 100644 --- a/core/src/main/java/org/polypheny/db/catalog/snapshot/LogicalRelSnapshot.java +++ b/core/src/main/java/org/polypheny/db/catalog/snapshot/LogicalRelSnapshot.java @@ -274,6 +274,7 @@ public interface LogicalRelSnapshot { @NotNull List getPrimaryKeys(); - @NotNull List getForeignKeys(); + @NotNull + List getForeignKeys(); } diff --git a/core/src/main/java/org/polypheny/db/catalog/snapshot/impl/LogicalDocSnapshotImpl.java b/core/src/main/java/org/polypheny/db/catalog/snapshot/impl/LogicalDocSnapshotImpl.java index 4c87f2f864..d02640dbc7 100644 --- a/core/src/main/java/org/polypheny/db/catalog/snapshot/impl/LogicalDocSnapshotImpl.java +++ b/core/src/main/java/org/polypheny/db/catalog/snapshot/impl/LogicalDocSnapshotImpl.java @@ -94,5 +94,4 @@ public LogicalDocSnapshotImpl( Map catalogs ) { } - } diff --git a/core/src/main/java/org/polypheny/db/docker/DockerUtils.java b/core/src/main/java/org/polypheny/db/docker/DockerUtils.java index 351f31e73f..93eae7ddbb 100644 --- a/core/src/main/java/org/polypheny/db/docker/DockerUtils.java +++ b/core/src/main/java/org/polypheny/db/docker/DockerUtils.java @@ -39,6 +39,7 @@ public static String normalizeHostname( String hostname ) { return newHostname; } + public static String getContainerName( DockerHost host ) { final String registryToUse = host.getRegistryOrDefault(); if ( registryToUse.isEmpty() || registryToUse.endsWith( "/" ) ) { diff --git a/core/src/main/java/org/polypheny/db/docker/PolyphenyCertificateManager.java b/core/src/main/java/org/polypheny/db/docker/PolyphenyCertificateManager.java index 03b7c28594..064528c4af 100644 --- a/core/src/main/java/org/polypheny/db/docker/PolyphenyCertificateManager.java +++ b/core/src/main/java/org/polypheny/db/docker/PolyphenyCertificateManager.java @@ -65,7 +65,7 @@ static PolyphenyKeypair loadClientKeypair( String context, String hostname ) thr PolyphenyHomeDirManager dirManager = PolyphenyHomeDirManager.getInstance(); String basePath = getBaseDirectory( context, hostname ); File clientKeyFile = dirManager.getHomeFile( basePath + "key.pem" ).orElseThrow( () -> new IOException( String.format( "Cannot read file %s", basePath + "key.pem" ) ) ); - File clientCertificateFile = dirManager.getHomeFile( basePath + "cert.pem" ).orElseThrow(() -> new IOException( String.format( "Cannot read file %s", basePath + "key.pem" ) ) ); + File clientCertificateFile = dirManager.getHomeFile( basePath + "cert.pem" ).orElseThrow( () -> new IOException( String.format( "Cannot read file %s", basePath + "key.pem" ) ) ); return PolyphenyKeypair.loadFromDisk( clientCertificateFile, clientKeyFile, RuntimeConfig.INSTANCE_UUID.getString() ); } diff --git a/core/src/main/java/org/polypheny/db/nodes/validate/ValidatorNamespace.java b/core/src/main/java/org/polypheny/db/nodes/validate/ValidatorNamespace.java index d7679dbd20..7913756c67 100644 --- a/core/src/main/java/org/polypheny/db/nodes/validate/ValidatorNamespace.java +++ b/core/src/main/java/org/polypheny/db/nodes/validate/ValidatorNamespace.java @@ -39,4 +39,4 @@ default ValidatorNamespace setDataModel( DataModel dataModel ) { } -} +} \ No newline at end of file diff --git a/core/src/main/java/org/polypheny/db/plan/AbstractAlgPlanner.java b/core/src/main/java/org/polypheny/db/plan/AbstractAlgPlanner.java index e99b83fb1a..e3f1a8765e 100644 --- a/core/src/main/java/org/polypheny/db/plan/AbstractAlgPlanner.java +++ b/core/src/main/java/org/polypheny/db/plan/AbstractAlgPlanner.java @@ -416,4 +416,3 @@ public Iterable> subClasses( final Class c * * * - * * @param partials */ public record FullPartition(long id, List partials) implements FieldDistribution { diff --git a/core/src/main/java/org/polypheny/db/schema/CustomFieldResolvingEntity.java b/core/src/main/java/org/polypheny/db/schema/CustomFieldResolvingEntity.java index 3f603974ad..de3459aad6 100644 --- a/core/src/main/java/org/polypheny/db/schema/CustomFieldResolvingEntity.java +++ b/core/src/main/java/org/polypheny/db/schema/CustomFieldResolvingEntity.java @@ -68,4 +68,3 @@ List>> resolveColumn( List names ); } - diff --git a/core/src/main/java/org/polypheny/db/schema/TableType.java b/core/src/main/java/org/polypheny/db/schema/TableType.java new file mode 100644 index 0000000000..d021f91eb4 --- /dev/null +++ b/core/src/main/java/org/polypheny/db/schema/TableType.java @@ -0,0 +1,241 @@ +/* + * Copyright 2019-2023 The Polypheny Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.polypheny.db.schema; + +/** + * Table type. + */ +public enum TableType { + /** + * A regular table. + * + * Used by DB2, MySQL, PostgreSQL and others. + */ + TABLE, + + /** + * A relation whose contents are calculated by evaluating a SQL expression. + * + * Used by DB2, PostgreSQL and others. + */ + VIEW, + + /** + * Foreign table. + * + * Used by PostgreSQL. + */ + FOREIGN_TABLE, + + /** + * Index table. + * + * Used by Apache Phoenix, PostgreSQL. + */ + INDEX, + + /** + * Join table. + * + * Used by Apache Phoenix. + */ + JOIN, + + /** + * Sequence table. + * + * Used by Apache Phoenix, Oracle, PostgreSQL and others. + * In Phoenix, must have a single BIGINT column called "$seq". + */ + SEQUENCE, + + /** + * Stream. + */ + STREAM, + + /** + * Type. + * + * Used by PostgreSQL. + */ + TYPE, + + /** + * A table maintained by the system. Data dictionary tables, such as the "TABLES" and "COLUMNS" table in the "metamodel" schema, examples of system tables. + * + * Specified by the JDBC standard and used by DB2, MySQL, Oracle, PostgreSQL and others. + */ + SYSTEM_TABLE, + + /** + * System view. + * + * Used by PostgreSQL, MySQL. + */ + SYSTEM_VIEW, + + /** + * System index. + * + * Used by PostgreSQL. + */ + SYSTEM_INDEX, + + /** + * System TOAST index. + * + * Used by PostgreSQL. + */ + SYSTEM_TOAST_INDEX, + + /** + * System TOAST table. + * + * Used by PostgreSQL. + */ + SYSTEM_TOAST_TABLE, + + /** + * Temporary index. + * + * Used by PostgreSQL. + */ + TEMPORARY_INDEX, + + /** + * Temporary sequence. + * + * Used by PostgreSQL. + */ + TEMPORARY_SEQUENCE, + + /** + * Temporary table. + * + * Used by PostgreSQL. + */ + TEMPORARY_TABLE, + + /** + * Temporary view. + * + * Used by PostgreSQL. + */ + TEMPORARY_VIEW, + + /** + * A table that is only visible to one connection. + * + * Specified by the JDBC standard and used by PostgreSQL, MySQL. + */ + LOCAL_TEMPORARY, + + /** + * A synonym. + * + * Used by DB2, Oracle. + */ + SYNONYM, + + /** + * An alias. + * + * Specified by the JDBC standard. + */ + ALIAS, + + /** + * A global temporary table. + * + * Specified by the JDBC standard. + */ + GLOBAL_TEMPORARY, + + /** + * An accel-only table. + * + * Used by DB2. + */ + ACCEL_ONLY_TABLE, + + /** + * An auxiliary table. + * + * Used by DB2. + */ + AUXILIARY_TABLE, + + /** + * A global temporary table. + * + * Used by DB2. + */ + GLOBAL_TEMPORARY_TABLE, + + /** + * A hierarchy table. + * + * Used by DB2. + */ + HIERARCHY_TABLE, + + /** + * An inoperative view. + * + * Used by DB2. + */ + INOPERATIVE_VIEW, + + /** + * A nickname. + * + * Used by DB2. + */ + NICKNAME, + + /** + * A typed table. + * + * Used by DB2. + */ + TYPED_TABLE, + + /** + * A typed view. + * + * Used by DB2. + */ + TYPED_VIEW, + + /** + * Table type not known to Polypheny-DB. + * + * If you get one of these, please fix the problem by adding an enum value. + */ + OTHER; + + /** + * The name used in JDBC. For example "SYSTEM TABLE" rather than "SYSTEM_TABLE". + */ + public final String jdbcName; + + + TableType() { + this.jdbcName = name().replace( '_', ' ' ); + } +} diff --git a/core/src/main/java/org/polypheny/db/schema/types/QueryableEntity.java b/core/src/main/java/org/polypheny/db/schema/types/QueryableEntity.java index 86c13bcb35..d4116551cd 100644 --- a/core/src/main/java/org/polypheny/db/schema/types/QueryableEntity.java +++ b/core/src/main/java/org/polypheny/db/schema/types/QueryableEntity.java @@ -33,4 +33,4 @@ default Type getElementType() { return PolyValue[].class; } -} +} \ No newline at end of file diff --git a/core/src/main/java/org/polypheny/db/type/entity/temporal/PolyTimestamp.java b/core/src/main/java/org/polypheny/db/type/entity/temporal/PolyTimestamp.java index 69906ce999..58be3ce4c7 100644 --- a/core/src/main/java/org/polypheny/db/type/entity/temporal/PolyTimestamp.java +++ b/core/src/main/java/org/polypheny/db/type/entity/temporal/PolyTimestamp.java @@ -190,5 +190,9 @@ public String toString() { return dateString; } + public String toHumanReadable() { + return dateFormat.format( new Date( getMillisSinceEpoch() ) ); + } + } diff --git a/core/src/main/resources/org/polypheny/db/runtime/PolyphenyDbResource.properties b/core/src/main/resources/org/polypheny/db/runtime/PolyphenyDbResource.properties index 7e9ed5c302..98ac62e7d0 100644 --- a/core/src/main/resources/org/polypheny/db/runtime/PolyphenyDbResource.properties +++ b/core/src/main/resources/org/polypheny/db/runtime/PolyphenyDbResource.properties @@ -77,30 +77,30 @@ ColumnNotNullable=Column ''{0}'' has no default value and does not allow NULLs TypeNotAssignable=Cannot assign to target field ''{0}'' of type {1} from source field ''{2}'' of type {3} ExceededCardinality=Array in column ''{0}'' with cardinality {1,number} exceeds max-cardinality of {2,number} ExceededDimension=Array in column ''{0}'' with dimension {1,number} exceeds max-dimension of {2,number} -DatabaseNotFound=Database ''{0}'' not found -NotValidJson=Value ''{0}'' is not valid JSON; {1} +DatabaseNotFound = Database ''{0}'' not found +NotValidJson = Value ''{0}'' is not valid JSON; {1} EntityNameNotFound = Entity ''{0}'' not found EntityNotFound = Entity ''{0}'' not found EntityNameNotFoundDidYouMean = Entity ''{0}'' not found; did you mean ''{1}''? -ObjectNotFound=Object ''{0}'' not found -ObjectNotFoundWithin=Object ''{0}'' not found within ''{1}'' -ObjectNotFoundDidYouMean=Object ''{0}'' not found; did you mean ''{1}''? -ObjectNotFoundWithinDidYouMean=Object ''{0}'' not found within ''{1}''; did you mean ''{2}''? -NotASequence=Entity ''{0}'' is not a sequence +ObjectNotFound = Object ''{0}'' not found +ObjectNotFoundWithin = Object ''{0}'' not found within ''{1}'' +ObjectNotFoundDidYouMean = Object ''{0}'' not found; did you mean ''{1}''? +ObjectNotFoundWithinDidYouMean = Object ''{0}'' not found within ''{1}''; did you mean ''{2}''? +NotASequence = Entity ''{0}'' is not a sequence FieldNotFound = Field ''{0}'' not found in any entity FieldNotFoundDidYouMean = Field ''{0}'' not found in any entity; did you mean ''{1}''? FieldNotFoundInEntity = Field ''{0}'' not found in entity ''{1}'' FieldNotFoundInEntityDidYouMean = Field ''{0}'' not found in entity ''{1}''; did you mean ''{2}''? -ColumnAmbiguous=Field ''{0}'' is ambiguous -NeedQueryOp=Operand {0} must be a query -NeedSameTypeParameter=Parameters must be of the same type -CanNotApplyOp2Type=Cannot apply ''{0}'' to arguments of type {1}. Supported form(s): {2} -ExpectedBoolean=Expected a boolean type -ExpectedCharacter=Expected a character type -ExpectedMultimedia=Expected a multimedia type -MustNotNullInElse=ELSE clause or at least one THEN clause must be non-NULL -FunctionUndefined=Function ''{0}'' is not defined -WrongNumberOfParam=Encountered {0} with {1,number} parameter(s); was expecting {2} +ColumnAmbiguous = Field ''{0}'' is ambiguous +NeedQueryOp = Operand {0} must be a query +NeedSameTypeParameter = Parameters must be of the same type +CanNotApplyOp2Type = Cannot apply ''{0}'' to arguments of type {1}. Supported form(s): {2} +ExpectedBoolean = Expected a boolean type +ExpectedCharacter = Expected a character type +ExpectedMultimedia = Expected a multimedia type +MustNotNullInElse = ELSE clause or at least one THEN clause must be non-NULL +FunctionUndefined = Function ''{0}'' is not defined +WrongNumberOfParam = Encountered {0} with {1,number} parameter(s); was expecting {2} IllegalMixingOfTypes=Illegal mixing of types in CASE or COALESCE statement InvalidCompare=Invalid compare. Comparing (collation, coercibility): ({0}, {1} with ({2}, {3}) is illegal DifferentCollations=Invalid syntax. Two explicit different collations ({0}, {1}) are illegal @@ -112,14 +112,14 @@ TypeNotComparableEachOther=Types {0} not comparable to each other NumberLiteralOutOfRange=Numeric literal ''{0}'' out of range DateLiteralOutOfRange=Date literal ''{0}'' out of range StringFragsOnSameLine=String literal continued on same line -AliasMustBeSimpleIdentifier=Entity or field alias must be a simple identifier -AliasListDegree=List of field aliases must have same degree as entity; entity has {0,number,#} fields {1}, whereas alias list has {2,number,#} fields -AliasListDuplicate=Duplicate name ''{0}'' in field alias list +AliasMustBeSimpleIdentifier = Entity or field alias must be a simple identifier +AliasListDegree = List of field aliases must have same degree as entity; entity has {0,number,#} fields {1}, whereas alias list has {2,number,#} fields +AliasListDuplicate = Duplicate name ''{0}'' in field alias list JoinRequiresCondition=INNER, LEFT, RIGHT or FULL join requires a condition (NATURAL keyword or ON or USING clause) CrossJoinDisallowsCondition=Cannot specify condition (NATURAL keyword, or ON or USING clause) following CROSS JOIN NaturalDisallowsOnOrUsing=Cannot specify NATURAL keyword with ON or USING clause -ColumnInUsingNotUnique=Field name ''{0}'' in USING clause is not unique on one side of join -NaturalOrUsingColumnNotCompatible=Field ''{0}'' matched using NATURAL keyword or USING clause has incompatible types: cannot compare ''{1}'' to ''{2}'' +ColumnInUsingNotUnique = Field name ''{0}'' in USING clause is not unique on one side of join +NaturalOrUsingColumnNotCompatible = Field ''{0}'' matched using NATURAL keyword or USING clause has incompatible types: cannot compare ''{1}'' to ''{2}'' WindowNotFound=Window ''{0}'' not found NotGroupExpr=Expression ''{0}'' is not being grouped GroupingArgument=Argument to {0} operator must be a grouped expression @@ -195,9 +195,9 @@ ArgumentMustNotBeNull=Argument to function ''{0}'' must not be NULL NullIllegal=Illegal use of ''NULL'' DynamicParamIllegal=Illegal use of dynamic parameter InvalidBoolean=''{0}'' is not a valid boolean value -ArgumentMustBeValidPrecision=Argument to function ''{0}'' must be a valid precision between ''{1,number,#}'' and ''{2,number,#}'' -IllegalArgumentForTableFunctionCall=Wrong arguments for entity function ''{0}'' call. Expected ''{1}'', actual ''{2}'' -InvalidDatetimeFormat=''{0}'' is not a valid datetime format +ArgumentMustBeValidPrecision = Argument to function ''{0}'' must be a valid precision between ''{1,number,#}'' and ''{2,number,#}'' +IllegalArgumentForTableFunctionCall = Wrong arguments for entity function ''{0}'' call. Expected ''{1}'', actual ''{2}'' +InvalidDatetimeFormat = ''{0}'' is not a valid datetime format InsertIntoAlwaysGenerated=Cannot INSERT into generated column ''{0}'' ArgumentMustHaveScaleZero=Argument to function ''{0}'' must have a scale of 0 PreparationAborted=Statement preparation aborted @@ -209,12 +209,12 @@ SQLFeature_F302=INTERSECT not supported SQLFeature_F312=MERGE not supported SQLFeature_S271=Basic multiset not supported SQLFeature_T613=TABLESAMPLE not supported -SQLConformance_MultipleActiveAutocommitStatements=Execution of a new autocommit statement while a cursor is still open on the same connection is not supported +SQLConformance_MultipleActiveAutocommitStatements = Execution of a new autocommit statement while a cursor is still open on the same connection is not supported SQLConformance_OrderByDesc=Descending sort (ORDER BY DESC) not supported SharedStatementPlans=Sharing of cached statement plans not supported -SQLFeatureExt_T613_Substitution=TABLESAMPLE SUBSTITUTE not supported -PersonalityManagesRowCount=Personality does not maintain entity''s row count in the catalog -PersonalitySupportsSnapshots=Personality does not support snapshot reads +SQLFeatureExt_T613_Substitution = TABLESAMPLE SUBSTITUTE not supported +PersonalityManagesRowCount = Personality does not maintain entity''s row count in the catalog +PersonalitySupportsSnapshots = Personality does not support snapshot reads PersonalitySupportsLabels=Personality does not support labels DuplicateNameInColumnList=Duplicate name ''{0}'' in column list RequireAtLeastOneArg=Require at least 1 argument @@ -227,28 +227,28 @@ RequireDefaultConstructor=Declaring class ''{0}'' of non-static user-defined fun FirstParameterOfAdd=In user-defined aggregate class ''{0}'', first parameter to ''add'' method must be the accumulator (the return type of the ''init'' method) FilterableEntityInventedFilter = FilterableEntity.scan returned a filter that was not in the original list: {0} FilterableScanReturnedNull = FilterableEntity.scan must not return null -CannotConvertToStream=Cannot convert entity ''{0}'' to stream -CannotConvertToRelation=Cannot convert stream ''{0}'' to relation -StreamMustGroupByMonotonic=Streaming aggregation requires at least one monotonic expression in GROUP BY clause -StreamMustOrderByMonotonic=Streaming ORDER BY must start with monotonic expression -StreamSetOpInconsistentInputs=Set operator cannot combine streaming and non-streaming inputs -CannotStreamValues=Cannot stream VALUES -CyclicDefinition=Cannot resolve ''{0}''; it references view ''{1}'', whose definition is cyclic -ModifiableViewMustBeBasedOnSingleTable=Modifiable view must be based on a single entity -ModifiableViewMustHaveOnlyEqualityPredicates=Modifiable view must be predicated only on equality expressions -MoreThanOneMappedColumn=View is not modifiable. More than one expression maps to column ''{0}'' of base entity ''{1}'' -NoValueSuppliedForViewColumn=View is not modifiable. No value is supplied for NOT NULL column ''{0}'' of base entity ''{1}'' -ViewConstraintNotSatisfied=Modifiable view constraint is not satisfied for column ''{0}'' of base entity ''{1}'' -StarRequiresRecordType=Not a record type. The ''*'' operator requires a record -FilterMustBeBoolean=FILTER expression must be of type BOOLEAN -CannotStreamResultsForNonStreamingInputs=Cannot stream results of a query with no streaming inputs: ''{0}''. At least one input should be convertible to a stream -MinusNotAllowed=MINUS is not allowed under the current SQL conformance level -SelectMissingFrom=SELECT must have a FROM clause -GroupFunctionMustAppearInGroupByClause=Group function ''{0}'' can only appear in GROUP BY clause -AuxiliaryWithoutMatchingGroupCall=Call to auxiliary group function ''{0}'' must have matching call to group function ''{1}'' in GROUP BY clause -PatternVarAlreadyDefined=Pattern variable ''{0}'' has already been defined -PatternPrevFunctionInMeasure=Cannot use PREV/NEXT in MEASURE ''{0}'' -PatternPrevFunctionOrder=Cannot nest PREV/NEXT under LAST/FIRST ''{0}'' +CannotConvertToStream = Cannot convert entity ''{0}'' to stream +CannotConvertToRelation = Cannot convert stream ''{0}'' to relation +StreamMustGroupByMonotonic = Streaming aggregation requires at least one monotonic expression in GROUP BY clause +StreamMustOrderByMonotonic = Streaming ORDER BY must start with monotonic expression +StreamSetOpInconsistentInputs = Set operator cannot combine streaming and non-streaming inputs +CannotStreamValues = Cannot stream VALUES +CyclicDefinition = Cannot resolve ''{0}''; it references view ''{1}'', whose definition is cyclic +ModifiableViewMustBeBasedOnSingleTable = Modifiable view must be based on a single entity +ModifiableViewMustHaveOnlyEqualityPredicates = Modifiable view must be predicated only on equality expressions +MoreThanOneMappedColumn = View is not modifiable. More than one expression maps to column ''{0}'' of base entity ''{1}'' +NoValueSuppliedForViewColumn = View is not modifiable. No value is supplied for NOT NULL column ''{0}'' of base entity ''{1}'' +ViewConstraintNotSatisfied = Modifiable view constraint is not satisfied for column ''{0}'' of base entity ''{1}'' +StarRequiresRecordType = Not a record type. The ''*'' operator requires a record +FilterMustBeBoolean = FILTER expression must be of type BOOLEAN +CannotStreamResultsForNonStreamingInputs = Cannot stream results of a query with no streaming inputs: ''{0}''. At least one input should be convertible to a stream +MinusNotAllowed = MINUS is not allowed under the current SQL conformance level +SelectMissingFrom = SELECT must have a FROM clause +GroupFunctionMustAppearInGroupByClause = Group function ''{0}'' can only appear in GROUP BY clause +AuxiliaryWithoutMatchingGroupCall = Call to auxiliary group function ''{0}'' must have matching call to group function ''{1}'' in GROUP BY clause +PatternVarAlreadyDefined = Pattern variable ''{0}'' has already been defined +PatternPrevFunctionInMeasure = Cannot use PREV/NEXT in MEASURE ''{0}'' +PatternPrevFunctionOrder = Cannot nest PREV/NEXT under LAST/FIRST ''{0}'' PatternAggregationInNavigation=Cannot use aggregation in navigation ''{0}'' PatternCountFunctionArg=Invalid number of parameters to COUNT method PatternRunningFunctionInDefine=Cannot use RUNNING/FINAL in DEFINE ''{0}'' @@ -258,18 +258,18 @@ PatternFunctionNullCheck=Null parameters in ''{0}'' UnknownPattern=Unknown pattern ''{0}'' IntervalMustBeNonNegative=Interval must be non-negative ''{0}'' CannotUseWithinWithoutOrderBy=Must contain an ORDER BY clause when WITHIN is used -FirstColumnOfOrderByMustBeTimestamp=First field of ORDER BY must be of type TIMESTAMP -ExtendNotAllowed=Extended fields not allowed under the current SQL conformance level -RolledUpNotAllowed=Rolled up field ''{0}'' is not allowed in {1} -SchemaExists=Namespace ''{0}'' already exists -ColumnExists=Field ''{0}'' already exists -NotNullAndNoDefaultValue=Field ''{0}'' is defined NOT NULL and has no default value assigned -SchemaInvalidType=Invalid namespace type ''{0}''; valid values: {1} -TableExists=Entity ''{0}'' already exists -CreateTableRequiresColumnList=Missing field list -CreateTableRequiresColumnTypes=Type required for field ''{0}'' in CREATE TABLE without AS +FirstColumnOfOrderByMustBeTimestamp = First field of ORDER BY must be of type TIMESTAMP +ExtendNotAllowed = Extended fields not allowed under the current SQL conformance level +RolledUpNotAllowed = Rolled up field ''{0}'' is not allowed in {1} +SchemaExists = Namespace ''{0}'' already exists +ColumnExists = Field ''{0}'' already exists +NotNullAndNoDefaultValue = Field ''{0}'' is defined NOT NULL and has no default value assigned +SchemaInvalidType = Invalid namespace type ''{0}''; valid values: {1} +TableExists = Entity ''{0}'' already exists +CreateTableRequiresColumnList = Missing field list +CreateTableRequiresColumnTypes = Type required for field ''{0}'' in CREATE TABLE without AS ViewExists=View ''{0}'' already exists and REPLACE not specified -SchemaNotFound=Namespace ''{0}'' not found +SchemaNotFound = Namespace ''{0}'' not found UserNotFound=User ''{0}'' not found ViewNotFound=View ''{0}'' not found TypeNotFound=Type ''{0}'' not found @@ -297,21 +297,21 @@ IllegalEmptyBehaviorInJsonQueryFunc=Illegal empty behavior ''{0}'' specified in ArrayOrObjectValueRequiredInStrictModeOfJsonQueryFunc=Strict jsonpath mode requires array or object value, and the actual value is: ''{0}'' IllegalErrorBehaviorInJsonQueryFunc=Illegal error behavior ''{0}'' specified in JSON_VALUE function NullKeyOfJsonObjectNotAllowed=Null key of JSON object is not allowed -QueryExecutionTimeoutReached=Timeout of ''{0}'' ms for query execution is reached. Query execution started at ''{1}'' -ExceptionWhilePerformingQueryOnJdbcSubSchema=While executing SQL [{0}] on JDBC sub-namespace -UnknownStoreName=There is no data store with this name: ''{0}'' -PlacementAlreadyExists=Entity ''{0}'' is already placed on store ''{1}'' -PlacementDoesNotExist=There is no placement of entity ''{1}'' on store ''{0}'' -PlacementIsPrimaryKey=The field ''{0}'' is part of the primary key and cannot be dropped -OnlyOnePlacementLeft=There needs to be at least one placement per entity -UnknownIndexMethod=The specified data store does not support the index method ''{0}''! -MissingColumnPlacement=There is no placement of field ''{0}'' on the specified data store! -IndexPreventsRemovalOfPlacement=Unable to remove placement of field ''{0}'' because it is part of the index ''{1}''! -IndexExists=There is already an index with the name ''{0}''! -DdlOnDataSource=The adapter name ''{0}'' refers to a data source. DDL statements are not allowed for data sources! -DdlOnSourceTable=DDL statements are not allowed for tables of type source! -UnknownAdapter=There is no adapter with this unique name: ''{0}'' -UnknownCollation=There is no collation with this name: ''{0}'' +QueryExecutionTimeoutReached = Timeout of ''{0}'' ms for query execution is reached. Query execution started at ''{1}'' +ExceptionWhilePerformingQueryOnJdbcSubSchema = While executing SQL [{0}] on JDBC sub-namespace +UnknownStoreName = There is no data store with this name: ''{0}'' +PlacementAlreadyExists = Entity ''{0}'' is already placed on store ''{1}'' +PlacementDoesNotExist = There is no placement of entity ''{1}'' on store ''{0}'' +PlacementIsPrimaryKey = The field ''{0}'' is part of the primary key and cannot be dropped +OnlyOnePlacementLeft = There needs to be at least one placement per entity +UnknownIndexMethod = The specified data store does not support the index method ''{0}''! +MissingColumnPlacement = There is no placement of field ''{0}'' on the specified data store! +IndexPreventsRemovalOfPlacement = Unable to remove placement of field ''{0}'' because it is part of the index ''{1}''! +IndexExists = There is already an index with the name ''{0}''! +DdlOnDataSource = The adapter name ''{0}'' refers to a data source. DDL statements are not allowed for data sources! +DdlOnSourceTable = DDL statements are not allowed for tables of type source! +UnknownAdapter = There is no adapter with this unique name: ''{0}'' +UnknownCollation = There is no collation with this name: ''{0}'' UnknownQueryInterface=There is no query interface with this unique name: ''{0}'' UnknownPartitionType=There is no partition with this name: ''{0}'' -PartitionNamesNotUnique=The partition names for a field need to be unique +PartitionNamesNotUnique = The partition names for a field need to be unique diff --git a/core/src/test/java/org/polypheny/db/docker/MockCatalogDocker.java b/core/src/test/java/org/polypheny/db/docker/MockCatalogDocker.java new file mode 100644 index 0000000000..e69de29bb2 diff --git a/dbms/src/main/java/org/polypheny/db/PolyphenyDb.java b/dbms/src/main/java/org/polypheny/db/PolyphenyDb.java index d30c84a726..dd43c1b6f5 100644 --- a/dbms/src/main/java/org/polypheny/db/PolyphenyDb.java +++ b/dbms/src/main/java/org/polypheny/db/PolyphenyDb.java @@ -42,6 +42,7 @@ import org.polypheny.db.StatusNotificationService.StatusType; import org.polypheny.db.adapter.index.IndexManager; import org.polypheny.db.adapter.java.AdapterTemplate; +import org.polypheny.db.backup.BackupManager; import org.polypheny.db.catalog.Catalog; import org.polypheny.db.catalog.entity.LogicalAdapter.AdapterType; import org.polypheny.db.catalog.exceptions.GenericRuntimeException; @@ -392,6 +393,9 @@ public void join( final long millis ) throws InterruptedException { // Initialize interface manager QueryInterfaceManager.initialize( transactionManager, authenticator ); + // Initialize backup manager + BackupManager backupManager = BackupManager.setAndGetInstance( new BackupManager(transactionManager) ); + // Call DockerManager once to remove old containers DockerManager.getInstance(); diff --git a/dbms/src/main/java/org/polypheny/db/backup/BackupEntityWrapper.java b/dbms/src/main/java/org/polypheny/db/backup/BackupEntityWrapper.java new file mode 100644 index 0000000000..1f87d1229c --- /dev/null +++ b/dbms/src/main/java/org/polypheny/db/backup/BackupEntityWrapper.java @@ -0,0 +1,68 @@ +/* + * Copyright 2019-2023 The Polypheny Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.polypheny.db.backup; + +import lombok.Getter; +import lombok.Setter; +import org.polypheny.db.backup.dependencies.EntityReferencer; +import org.polypheny.db.catalog.entity.PolyObject; + +/** + * Wrapps elements to be backed up with additional information needed for the insertion of the backup process + * @param The type of element to be wrapped + */ +@Getter @Setter +public class BackupEntityWrapper { + + private E entityObject; + + private Boolean toBeInserted = true; + + //default, original name (change if rename needed (options)) + private String nameForQuery; + + private EntityReferencer entityReferencer; + + + /** + * Constructor for a BackupEntityWrapper + * @param entity The entity to be wrapped + * @param toBeInserted Whether the entity should be inserted or not (on restoration, default true) + * @param nameForQuery The name to be used for the entity in the restoration (insertion), the original name is preserved in the entityObject + * @param entityReferencer The entityReferencer to be used for the entity (all entities that reference this entity) + */ + public BackupEntityWrapper( E entity, Boolean toBeInserted, String nameForQuery, EntityReferencer entityReferencer ) { + this.entityObject = entity; + this.toBeInserted = toBeInserted; + this.nameForQuery = nameForQuery; + this.entityReferencer = entityReferencer; + } + + + /** + * Constructor for a BackupEntityWrapper + * @param entity The entity to be wrapped + * @param nameForQuery The name to be used for the entity in the restoration (insertion), the original name is preserved in the entityObject + * @param entityReferencer The entityReferencer to be used for the entity (all entities that reference this entity) + */ + public BackupEntityWrapper( E entity, String nameForQuery, EntityReferencer entityReferencer ) { + this.entityObject = entity; + this.nameForQuery = nameForQuery; + this.entityReferencer = entityReferencer; + } + +} diff --git a/dbms/src/main/java/org/polypheny/db/backup/BackupInformationObject.java b/dbms/src/main/java/org/polypheny/db/backup/BackupInformationObject.java new file mode 100644 index 0000000000..e70f9e587e --- /dev/null +++ b/dbms/src/main/java/org/polypheny/db/backup/BackupInformationObject.java @@ -0,0 +1,257 @@ +/* + * Copyright 2019-2023 The Polypheny Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.polypheny.db.backup; + +import com.google.common.collect.ImmutableMap; +import lombok.Getter; +import lombok.Setter; +import org.polypheny.db.backup.dependencies.BackupEntityType; +import org.polypheny.db.backup.dependencies.EntityReferencer; +import org.polypheny.db.catalog.entity.LogicalConstraint; +import org.polypheny.db.catalog.entity.logical.*; + +import java.util.ArrayList; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import org.polypheny.db.catalog.logistic.DataModel; +import org.polypheny.db.catalog.logistic.EntityType; +import org.polypheny.db.util.Pair; + +@Getter @Setter +/** + * This class stores all information (in the correct format) about the schema for the backup process + */ +public class BackupInformationObject { + + //ImmutableMap namespaces; + private List namespaces; + + private List relNamespaces; + + private List graphNamespaces; + + private List docNamespaces; + + //private ImmutableMap> bupRelNamespaces; + //private ImmutableMap> bupGraphNamespaces; + //private ImmutableMap> bupDocNamespaces; + + private ImmutableMap> wrappedNamespaces; + + /*//TODO(FF): Adjust/correct how the views and materialized views are collected (in gatherSchema) + @Getter @Setter + List views; + @Getter @Setter + List materializedViews; + */ + + //namespace id, list of entities for the namespace + private ImmutableMap> views; + + private ImmutableMap>> wrappedViews; + + private ImmutableMap> materializedViews; + + private ImmutableMap>> wrappedMaterializedViews; + + private ImmutableMap> tables; + + private ImmutableMap>> wrappedTables; + + private ImmutableMap> collections; + + private ImmutableMap>> wrappedCollections; + + private ImmutableMap graphs; + + private ImmutableMap>> wrappedGraphs; + + //table id, list of views for the table + private ImmutableMap> columns; + + private ImmutableMap> primaryKeysPerTable; + + private ImmutableMap> foreignKeysPerTable; + + private ImmutableMap> logicalIndexes; + + private ImmutableMap>> wrappedLogicalIndexes; + + private ImmutableMap> constraints; + + + private Boolean collectedRelSchema = false; + + private Boolean collectedDocSchema = false; + + private Boolean collectedGraphSchema = false; + + + /** + * Wraps a list of namespaces in a BackupEntityWrapper, which contains information needed for the insertion and dependencies + * @param namespaces the list of namespaces to be wrapped + * @param namespaceDependencies Map > has per namespaceId a list of namespaceIds that reference (have a dependency) the namespace (the key) + * @param tableDependencies Map > has per namespaceId a list of tableIds that reference (have a dependency) the namespace (the key) + * @param namespaceTableDependencies Map>> has per namespaceId a list of pairs of tableIds and the corresponding namespaceId that reference (have a dependency) the namespace (the key) + * @param toBeInserted boolean that indicates if the entity should be inserted (while restoring) + * @return ImmutableMap> of wrapped namespaces, where the key is the namespaceId and the Value is the wrapped namespace + */ + public ImmutableMap> wrapNamespaces( List namespaces, Map> namespaceDependencies, Map> tableDependencies, Map>> namespaceTableDependencies, Boolean toBeInserted ) { + + ImmutableMap> resultMap; + Map> tempNS = new HashMap<>(); + //BackupEntityWrapper nsBupObj = new BackupEntityWrapper<>(); + + for ( LogicalNamespace ns : namespaces ) { + /* + nsBupObj.setEntityObject( ns ); + nsBupObj.setToBeInserted( toBeInserted ); + nsBupObj.setNameForQuery( ns.name ); + */ + //E entity, Boolean toBeInserted, String nameForQuery, EntityReferencer entityReferencer + BackupEntityWrapper nsBupObj = new BackupEntityWrapper<>(ns, toBeInserted, ns.name, null); + tempNS.put( ns.id, nsBupObj ); + } + + resultMap = ImmutableMap.copyOf( tempNS ); + return resultMap; + } + + + /** + * Wraps each element of a list in a BackupEntityWrapper and returns a map of the wrapped namespaces + * @param namespaces the list of namespaces to be wrapped + * @param namespaceDependencies Map > has per namespaceId a list of namespaceIds that reference (have a dependency) the namespace (the key) + * @param tableDependencies Map > has per namespaceId a list of tableIds that reference (have a dependency) the namespace (the key) + * @param namespaceTableDependencies Map>> has per namespaceId a list of pairs of tableIds and the corresponding namespaceId that reference (have a dependency) the namespace (the key) + * @return ImmutableMap> of wrapped namespaces, where the key is the namespaceId and the Value is the wrapped namespace + */ + public ImmutableMap> wrapNamespaces( List namespaces, Map> namespaceDependencies, Map> tableDependencies, Map>> namespaceTableDependencies ) { + + ImmutableMap> resultMap; + Map> tempNS = new HashMap<>(); + + for ( LogicalNamespace ns : namespaces ) { + /* + BackupEntityWrapper nsBupObj = new BackupEntityWrapper<>(); + nsBupObj.setEntityObject( ns ); + nsBupObj.setNameForQuery( ns.name ); + */ + + // create entityReferences for each namespace (if there is a reference) with namespacedependencies, and add entityReferences to the backupinformationobject + if ( namespaceDependencies.containsKey( ns.id ) || namespaceTableDependencies.containsKey( ns.id ) ) { + EntityReferencer entityReferencer = new EntityReferencer( ns.id, BackupEntityType.NAMESPACE ); + if ( namespaceDependencies.containsKey( ns.id ) ) { + entityReferencer.setReferencerNamespaces( namespaceDependencies.get( ns.id ) ); + } + if ( namespaceTableDependencies.containsKey( ns.id )) { + entityReferencer.setReferencerNamespaceTablePairs( namespaceTableDependencies.get( ns.id ) ); + + // get out all the tableIds from the pairs and add them to the referencerTables list + List tempReferencerTables = new ArrayList<>(); + for ( Pair pair : namespaceTableDependencies.get( ns.id ) ) { + tempReferencerTables.add( pair.left ); + } + entityReferencer.setReferencerTables( tempReferencerTables ); + } + BackupEntityWrapper nsBupObj = new BackupEntityWrapper<>(ns, ns.name, entityReferencer); + tempNS.put( ns.id, nsBupObj ); + //nsBupObj.setEntityReferencer( entityReferencer ); + } else { + //E entity, Boolean toBeInserted, String nameForQuery, EntityReferencer entityReferencer + BackupEntityWrapper nsBupObj = new BackupEntityWrapper<>(ns, ns.name, null); + tempNS.put( ns.id, nsBupObj ); + } + } + + resultMap = ImmutableMap.copyOf( tempNS ); + return resultMap; + } + + + /** + * Wraps each element of a list in a BackupEntityWrapper and returns a map of the wrapped entity (right now only tested for tables and collections (no dependencies yet), but should work for all inheritors of LogicalEntity) + * @param entityMap the list of entities to be wrapped (where the key is the namespaceId, and the value is a list of entities for that namespace) + * @param tableDependencies Map > has per tableId a list of tableIds that reference (have a dependency) the table (the key) + * @param namespaceTableDependendencies Map>> has per tableId a list of pairs of tableIds and the corresponding namespaceId that reference (have a dependency) the table (the key) + * @param toBeInserted boolean that indicates if the entity should be inserted (while restoring) + * @return ImmutableMap>> of wrapped entities, where the key is the namespaceId, and the value is a list of wrapped entities (each entity individually is wrapped) for that namespace + */ + public ImmutableMap>> wrapLogicalEntities( Map> entityMap, Map> tableDependencies, Map>> namespaceTableDependendencies, Boolean toBeInserted ) { + + ImmutableMap>> resultMap; + Map>> tempMap = new HashMap<>(); + + //go through each element from entityMap, and for each list go through each element and transform it to a BupSuperEntity + for ( Map.Entry> entry : entityMap.entrySet() ) { + List entityList = entry.getValue(); + List> bupEntityList = new ArrayList<>(); + + for ( LogicalEntity entity : entityList ) { + BackupEntityWrapper tempBupEntity = new BackupEntityWrapper<>(entity, toBeInserted, entity.name, null); + /* + tempBupEntity.setEntityObject( entity ); + tempBupEntity.setToBeInserted( toBeInserted ); + tempBupEntity.setNameForQuery( entity.name ); + + */ + bupEntityList.add( tempBupEntity ); + + + // create entityReferences for each table (if there is a reference) with tableDependencies, and add entityReferences to the backupinformationobject, but only for relational entit + if (entity.getEntityType().equals( EntityType.ENTITY) && !(entity.getDataModel().equals( DataModel.DOCUMENT ) || entity.getDataModel().equals( DataModel.GRAPH ))) { + EntityReferencer entityReferencer = new EntityReferencer( entity.getId(), BackupEntityType.TABLE ); + if (tableDependencies.containsKey( entity.getId() )) { + entityReferencer.setReferencerTables( tableDependencies.get( entity.getId() ) ); + } + if (namespaceTableDependendencies.containsKey( entity.getId() )) { + entityReferencer.setReferencerNamespaceTablePairs( namespaceTableDependendencies.get( entity.getId() ) ); + } + tempBupEntity.setEntityReferencer( entityReferencer ); + } + + + } + + tempMap.put( entry.getKey(), bupEntityList ); + + } + + resultMap = ImmutableMap.copyOf( tempMap ); + return resultMap; + } + + + /** + * Collects all entityReferencers for all tables that are saved in the BackupInformationObject (needs wrapped tables) + * @return list of all entityReferencers of the BackupEntityType table + */ + public List getAllTableReferencers() { + //TODO(FF): test if this does the correct thing + List tableReferencers = new ArrayList<>(); + for ( Map.Entry>> entry : wrappedTables.entrySet() ) { + for ( BackupEntityWrapper entityWrapper : entry.getValue() ) { + if ( entityWrapper.getEntityReferencer() != null ) { + tableReferencers.add( entityWrapper.getEntityReferencer() ); + } + } + } + return tableReferencers; + } + +} diff --git a/dbms/src/main/java/org/polypheny/db/backup/BackupManager.java b/dbms/src/main/java/org/polypheny/db/backup/BackupManager.java new file mode 100644 index 0000000000..91c729feb3 --- /dev/null +++ b/dbms/src/main/java/org/polypheny/db/backup/BackupManager.java @@ -0,0 +1,369 @@ +/* + * Copyright 2019-2023 The Polypheny Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.polypheny.db.backup; + +import com.google.common.collect.ImmutableMap; +import java.util.ArrayList; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import lombok.Getter; +import lombok.extern.slf4j.Slf4j; +import org.polypheny.db.backup.datagatherer.GatherEntries; +import org.polypheny.db.backup.datagatherer.GatherSchema; +import org.polypheny.db.backup.datainserter.InsertEntries; +import org.polypheny.db.backup.datainserter.InsertSchema; +import org.polypheny.db.backup.dependencies.DependencyManager; +import org.polypheny.db.backup.dependencies.EntityReferencer; +import org.polypheny.db.backup.webui.BackupCrud; +import org.polypheny.db.catalog.entity.logical.LogicalEntity; +import org.polypheny.db.catalog.entity.logical.LogicalForeignKey; +import org.polypheny.db.catalog.entity.logical.LogicalNamespace; +import org.polypheny.db.catalog.exceptions.GenericRuntimeException; +import org.polypheny.db.catalog.logistic.EntityType; +import org.polypheny.db.information.InformationAction; +import org.polypheny.db.information.InformationGroup; +import org.polypheny.db.information.InformationManager; +import org.polypheny.db.information.InformationPage; +import org.polypheny.db.information.InformationText; +import org.polypheny.db.transaction.TransactionManager; +import org.polypheny.db.util.Pair; +import org.polypheny.db.util.Triple; + + +@Slf4j +/** + * The BackupManager manages the backup process. It manages the data gathering, inserting and saving. + */ +public class BackupManager { + + //public static AtomicLong idCounter = new AtomicLong( 0 ); // backupid counter todo ff add id to each backup + + @Getter + private static BackupManager INSTANCE = null; + private final BackupCrud backupCrud; + private InformationPage informationPage; + private InformationGroup informationGroupOverview; + @Getter + private BackupInformationObject backupInformationObject; + public static TransactionManager transactionManager = null; + public static int batchSize = 2; // can be set with startDataGathering() + public static int threadNumber = 8; //#cores (#cpu's) for now + //private final Logger logger; + + + /** + * Constructor for the BackupManager + * @param transactionManager the transactionManager is required to execute queries + */ + public BackupManager( TransactionManager transactionManager ) { + BackupManager.transactionManager = transactionManager; + + informationPage = new InformationPage( "Backup Tasks" ); + informationPage.fullWidth(); + informationGroupOverview = new InformationGroup( informationPage, "Overview" ); + + // datagatherer.GatherEntries gatherEntries = new datagatherer.GatherEntries(); + //GatherEntries gatherEntries = new GatherEntries(); + + InformationManager im = InformationManager.getInstance(); + im.addPage( informationPage ); + im.addGroup( informationGroupOverview ); + + // start backup button + InformationText startBackup = new InformationText( informationGroupOverview, "Create the Backup." ); + startBackup.setOrder( 1 ); + im.registerInformation( startBackup ); + + InformationAction startBackupAction = new InformationAction( informationGroupOverview, "Start", parameters -> { + //IndexManager.getInstance().resetCounters(); + startDataGathering( -1 ); + System.out.println( "gather" ); + return "Successfully started backup"; + } ); + startBackupAction.setOrder( 2 ); + im.registerInformation( startBackupAction ); + + // insert backup-data button + InformationText insertBackupData = new InformationText( informationGroupOverview, "Insert the Backup Data." ); + insertBackupData.setOrder( 3 ); + im.registerInformation( insertBackupData ); + + InformationAction insertBackupDataAction = new InformationAction( informationGroupOverview, "Insert", parameters -> { + //IndexManager.getInstance().resetCounters(); + startInserting(); + System.out.println( "hii" ); + return "Successfully inserted backup data"; + } ); + insertBackupDataAction.setOrder( 4 ); + im.registerInformation( insertBackupDataAction ); + + this.backupCrud = new BackupCrud( this ); + + } + + + /** + * Sets and returns the BackupManager instance + * @param backupManager The backupManager to be set + * @return the instance of the backupManager + */ + public static BackupManager setAndGetInstance( BackupManager backupManager ) { + if ( INSTANCE != null ) { + throw new GenericRuntimeException( "Setting the BackupInterface, when already set is not permitted." ); + } + INSTANCE = backupManager; + return INSTANCE; + } + + + /** + * Starts the data gathering process. It is responsible for starting and managing the schema and the entry data gathering. + * It starts the wrapping process of the schema data. + * It also starts the process of saving the data to a file (resp. it is started in the GatherEntries class) + * @param setBatchSize The batch size for the backup manager (stays that way also for the insertion) + */ + public void startDataGathering( int setBatchSize ) { + + //TODO: catch if it is set to an invalid value + // if setBatchSize is -1, the default value is used + if ( ! (setBatchSize == -1) ) { + batchSize = setBatchSize; + } + this.backupInformationObject = new BackupInformationObject(); + GatherSchema gatherSchema = new GatherSchema(); + + //gatherEntries.start(); + this.backupInformationObject = gatherSchema.start( backupInformationObject ); + wrapEntities(); + + // how/where do i safe the data + //gatherEntries.start(); + + + List> tablesForDataCollection = tableDataToBeGathered(); + List> collectionsForDataCollection = collectionDataToBeGathered(); + List graphNamespaceIds = collectGraphNamespaceIds(); + GatherEntries gatherEntries = new GatherEntries(transactionManager, tablesForDataCollection, collectionsForDataCollection, graphNamespaceIds); + gatherEntries.start(); + log.info( "finished all datagathering" ); + + + } + + + /** + * Wraps the entities with the BackupEntityWrapper. Each entity is wrapped indivually (but the wrapping itself is not happening here), and are brought back into the respecitve (map or list) form for the BackupInformationObject + * Part of the wrapped information is the entity itself, if the entity should be inserted, the name used in the insertion and the dependencies between the entites. + * Entities that are wrapped are: namespaces, tables, collections (to be done: views, indexes (document and relational), materialized views) + */ + private void wrapEntities() { + // 1. check for dependencies + // 2. wrap namespaces, tables, views, etc + + ImmutableMap> foreignKeysPerTable = backupInformationObject.getForeignKeysPerTable(); + Map> namespaceDependencies = new HashMap<>(); // key: namespaceId, value: referencedKeySchemaId + Map> tableDependencies = new HashMap<>(); // key: tableId, value: referencedKeyTableId + Map>> namespaceTableDependendencies = new HashMap<>(); // key: namespaceId, value: + Map> viewDependencies = new HashMap<>(); + //TODO(FF): are there dependencies for collections? (views/indexes from collections?) + + //go through all foreign keys, and check if the namespaceId equals the referencedKeySchemaId, and if not, add it to the namespaceDependencies map, with the namespaceId as key and the referencedKeySchemaId as value + for ( Map.Entry> entry : foreignKeysPerTable.entrySet() ) { + for ( LogicalForeignKey logicalForeignKey : entry.getValue() ) { + if ( logicalForeignKey.namespaceId != logicalForeignKey.referencedKeyNamespaceId ) { + + // Check for namespace dependencies + if ( namespaceDependencies.containsKey( logicalForeignKey.namespaceId ) ) { + List temp = namespaceDependencies.get( logicalForeignKey.namespaceId ); + //only add it if it isn't already in the list?? + temp.add( logicalForeignKey.referencedKeyNamespaceId ); + namespaceDependencies.put( logicalForeignKey.namespaceId, temp ); + } else { + List temp = new ArrayList<>(); + temp.add( logicalForeignKey.referencedKeyNamespaceId ); + namespaceDependencies.put( logicalForeignKey.namespaceId, temp ); + } + + // Check for table dependencies + if ( tableDependencies.containsKey( logicalForeignKey.entityId ) ) { + List temp = tableDependencies.get( logicalForeignKey.entityId ); + temp.add( logicalForeignKey.referencedKeyEntityId ); + tableDependencies.put( logicalForeignKey.entityId, temp ); + + List> temp2 = namespaceTableDependendencies.get( logicalForeignKey.namespaceId ); + temp2.add( new Pair<>( logicalForeignKey.referencedKeyNamespaceId, logicalForeignKey.referencedKeyEntityId ) ); + namespaceTableDependendencies.put( logicalForeignKey.namespaceId, temp2 ); + } else { + List temp = new ArrayList<>(); + temp.add( logicalForeignKey.referencedKeyEntityId ); + tableDependencies.put( logicalForeignKey.entityId, temp ); + + List> temp2 = new ArrayList<>(); + temp2.add( new Pair<>( logicalForeignKey.referencedKeyNamespaceId, logicalForeignKey.referencedKeyEntityId ) ); + namespaceTableDependendencies.put( logicalForeignKey.namespaceId, temp2 ); + } + + + } + } + } + + // wrap all namespaces with BackupEntityWrapper + ImmutableMap> wrappedNamespaces = backupInformationObject.wrapNamespaces( backupInformationObject.getNamespaces(), namespaceDependencies, tableDependencies, namespaceTableDependendencies); + backupInformationObject.setWrappedNamespaces( wrappedNamespaces ); + + // wrap all tables with BackupEntityWrapper + ImmutableMap>> wrappedTables = backupInformationObject.wrapLogicalEntities( backupInformationObject.getTables(), tableDependencies, namespaceTableDependendencies, true); + backupInformationObject.setWrappedTables( wrappedTables ); + + // wrap all collections with BackupEntityWrapper + ImmutableMap>> wrappedCollections = backupInformationObject.wrapLogicalEntities( backupInformationObject.getCollections(), null, namespaceTableDependendencies, true); + backupInformationObject.setWrappedCollections( wrappedCollections ); + + /* + ArrayList lol = new ArrayList<>(); + lol.add( (LogicalTable) backupInformationObject.getTables().get( 0 )); + + Map> lol2 = backupInformationObject.getTables(); + Map> lol3 = backupInformationObject.test2(lol2); + + + //ImmutableMap ha = backupInformationObject.test( lol ); + + */ + + // testing + DependencyManager dependencyManager = new DependencyManager(); + EntityReferencer entityReferencer = null; + List allTableReferencers = backupInformationObject.getAllTableReferencers(); + Map> test = new HashMap<>(); + if (entityReferencer != null) { + for ( EntityReferencer tableReferencer : allTableReferencers ) { + List lol = dependencyManager.getReferencedEntities(entityReferencer, allTableReferencers ); + test.put( tableReferencer.getEntityId(), lol ); + } + } + + + + } + + + /** + * Starts the inserting process. It is responsible for starting and managing the schema and the entry data inserting. + */ + public void startInserting() { + InsertSchema insertSchema = new InsertSchema( transactionManager ); + + if ( backupInformationObject != null ) { + insertSchema.start( backupInformationObject ); + } else { + log.info( "backupInformationObject is null" ); + } + + + InsertEntries insertEntries = new InsertEntries(transactionManager); + insertEntries.start(); + log.info( "inserting done" ); + } + + + /** + * Returns a list of all table names where the entry-data should be collected for the backup (right now, all of them, except sources) + * @return List of triples with the format: + */ + private List> tableDataToBeGathered() { + List> tableDataToBeGathered = new ArrayList<>(); + List relationalNamespaces = backupInformationObject.getRelNamespaces(); + + if (!relationalNamespaces.isEmpty()) { + for ( LogicalNamespace relationalNamespace : relationalNamespaces ) { + List tables = backupInformationObject.getTables().get( relationalNamespace.id ); + if(!tables.isEmpty() ) { + for ( LogicalEntity table : tables ) { + if (!(table.entityType.equals( EntityType.SOURCE ))) { + Triple triple = new Triple( relationalNamespace.id, relationalNamespace.name, table.name ); + tableDataToBeGathered.add( triple ); + } + } + } + } + } + + return tableDataToBeGathered; + } + + + /** + * Returns a list of triples with all collection names and their corresponding namespaceId where the entry-data should be collected for the backup (right now all of them) + * @return List of triples with the format: + */ + private List> collectionDataToBeGathered() { + List> collectionDataToBeGathered = new ArrayList<>(); + + for ( Map.Entry> entry : backupInformationObject.getCollections().entrySet() ) { + for ( LogicalEntity collection : entry.getValue() ) { + String nsName = getNamespaceName( entry.getKey() ); + collectionDataToBeGathered.add( new Triple<>( entry.getKey(), nsName, collection.name ) ); + } + } + return collectionDataToBeGathered; + } + + + /** + * Gets a list of all graph namespaceIds, which should be collected in the backup (right now all of them) + * @return List of all graph namespaceIds + */ + private List collectGraphNamespaceIds() { + List graphNamespaceIds = new ArrayList<>(); + for ( Map.Entry entry : backupInformationObject.getGraphs().entrySet() ) { + graphNamespaceIds.add( entry.getKey() ); + } + return graphNamespaceIds; + } + + + /** + * Gets the namespaceName for a given namespaceId + * @param nsId id of the namespace you want the name for + * @return the name of the namespace + */ + private String getNamespaceName (Long nsId) { + String namespaceName = backupInformationObject.getNamespaces().stream().filter( namespace -> namespace.id == nsId ).findFirst().get().name; + return namespaceName; + } + + + /** + * Gets the number of columns for a given table (via the table name) + * @param nsId The id of the namespace where the table is located in + * @param tableName The name of the table you want to know the number of columns from + * @return The number of columns for the table + */ + public int getNumberColumns( Long nsId, String tableName ) { + //get the tableId for the given tableName + Long tableId = backupInformationObject.getTables().get( nsId ).stream().filter( table -> table.name.equals( tableName ) ).findFirst().get().id; + + // go through all columns in the backupinformationobject and see how many are associated with a table + int nbrCols = backupInformationObject.getColumns().get( tableId ).size(); + log.info( String.format( "nbr cols for table %s: %s", tableName, nbrCols )); + return nbrCols; + } + +} diff --git a/dbms/src/main/java/org/polypheny/db/backup/datagatherer/GatherEntries.java b/dbms/src/main/java/org/polypheny/db/backup/datagatherer/GatherEntries.java new file mode 100644 index 0000000000..7c114c9b1f --- /dev/null +++ b/dbms/src/main/java/org/polypheny/db/backup/datagatherer/GatherEntries.java @@ -0,0 +1,200 @@ +/* + * Copyright 2019-2023 The Polypheny Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.polypheny.db.backup.datagatherer; + +import java.io.BufferedInputStream; +import java.io.BufferedOutputStream; +import java.io.BufferedWriter; +import java.io.DataInputStream; +import java.io.DataOutputStream; +import java.io.File; +import java.io.FileInputStream; +import java.io.FileOutputStream; +import java.io.FileWriter; +import java.io.IOException; +import java.io.RandomAccessFile; +import java.nio.ByteBuffer; +import java.nio.channels.FileChannel; +import java.nio.file.Files; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Calendar; +import java.util.Date; +import java.util.List; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.Executors; +import java.util.concurrent.TimeUnit; +import lombok.Getter; +import lombok.extern.slf4j.Slf4j; +import org.polypheny.db.PolyImplementation; +import org.polypheny.db.ResultIterator; +import org.polypheny.db.backup.BackupManager; +import org.polypheny.db.backup.datasaver.manifest.BackupManifestGenerator; +import org.polypheny.db.backup.datasaver.manifest.EntityInfo; +import org.polypheny.db.catalog.Catalog; +import org.polypheny.db.catalog.exceptions.GenericRuntimeException; +import org.polypheny.db.catalog.logistic.DataModel; +import org.polypheny.db.languages.LanguageManager; +import org.polypheny.db.languages.QueryLanguage; +import org.polypheny.db.processing.ImplementationContext.ExecutedContext; +import org.polypheny.db.processing.QueryContext; +import org.polypheny.db.transaction.Statement; +import org.polypheny.db.transaction.Transaction; +import org.polypheny.db.transaction.TransactionException; +import org.polypheny.db.transaction.TransactionManager; +import org.polypheny.db.type.entity.PolyValue; +import org.polypheny.db.util.PolyphenyHomeDirManager; +import org.polypheny.db.util.Triple; + +/** + * This class is responsible for gathering the entries from the database that should be saved in the backup. One thread is created for each entity, and each entity gets one file. + */ +@Slf4j +public class GatherEntries { + private final TransactionManager transactionManager; + private final List> tablesToBeCollected; + private final List> collectionsToBeCollected; + private final List graphNamespaceIds; + //private final int = hal.getProcessor().getPhysicalProcessorCount(); + + @Getter + private File backupFolder = null; + @Getter + private File dataFolder = null; + PolyphenyHomeDirManager homeDirManager = PolyphenyHomeDirManager.getInstance(); + + + /** + * Gathers entries from the database that should be saved in the backup, creates a thread for each entity (uses threadpool) + * @param transactionManager TransactionManager to use + * @param tablesToBeCollected List of tables that should be collected + * @param collectionsForDataCollection List of collections that should be collected + * @param graphNamespaceIds List of graph namespaces that should be collected + */ + public GatherEntries( TransactionManager transactionManager, List> tablesToBeCollected, List> collectionsForDataCollection, List graphNamespaceIds ) { + this.transactionManager = transactionManager; + this.tablesToBeCollected = tablesToBeCollected; + this.collectionsToBeCollected = collectionsForDataCollection; + this.graphNamespaceIds = graphNamespaceIds; + } + + // Move data around as little as possible -> use shortest possible path + // Stream and flush data + + /** + * Starts the gathering of the entries from the database that should be saved in the backup. The entries are gathered with select statemens and saved in files. + */ + public void start() { + ExecutorService executorService = null; + List entityInfoList = new ArrayList<>(); + try { + executorService = Executors.newFixedThreadPool( BackupManager.threadNumber ); + //initFileTest(); + //PolyphenyHomeDirManager homeDirManager = PolyphenyHomeDirManager.getInstance(); + backupFolder = homeDirManager.registerNewFolder( "backup" ); + dataFolder = homeDirManager.registerNewFolder( backupFolder, "data" ); + String dataFolderPath = "backup/data"; + + if ( !tablesToBeCollected.isEmpty() ) { + //go through each pair in tablesToBeCollectedList + for ( Triple table : tablesToBeCollected ) { + List filePaths = new ArrayList<>(); + //int nbrCols = BackupManager.getNumberColumns(table.getLeft(), table.getRight()); + int nbrCols = BackupManager.getINSTANCE().getNumberColumns( table.getLeft(), table.getRight() ); + //TODO(FF): exclude default columns? no, how do you differentiate for each line if it is not a default value + String query = String.format( "SELECT * FROM %s.%s", table.getMiddle(), table.getRight() ); + //executeQuery2( query, DataModel.RELATIONAL, Catalog.defaultNamespaceId ); + + String fileName = String.format( "tab_%s_%s.txt", table.getMiddle(), table.getRight() ); + File tableData = homeDirManager.registerNewFile( getDataFolder(), fileName ); + filePaths.add( String.format( "%s/%s", dataFolderPath, fileName ) ); + EntityInfo entityInfo = new EntityInfo( filePaths, table.getRight(), table.getMiddle(), table.getLeft(), DataModel.RELATIONAL, nbrCols ); + entityInfoList.add( entityInfo ); + executorService.submit( new GatherEntriesTask( transactionManager, query, DataModel.RELATIONAL, Catalog.defaultNamespaceId, tableData ) ); + } + /* + for ( String nsTableName : tablesToBeCollected ) { + String query = "SELECT * FROM " + nsTableName; + executeQuery( query, DataModel.RELATIONAL, Catalog.defaultNamespaceId ); + } */ + } + + if ( !collectionsToBeCollected.isEmpty() ) { + for ( Triple collection : collectionsToBeCollected ) { + List filePaths = new ArrayList<>(); + String query = String.format( "db.%s.find()", collection.getRight() ); + //executeQuery2( query, DataModel.DOCUMENT, collection.getKey() ); + + String fileName = String.format( "col_%s.txt", collection.getRight() ); + File collectionData = homeDirManager.registerNewFile( getDataFolder(), fileName ); + filePaths.add( String.format( "%s/%s", dataFolderPath, fileName ) ); + EntityInfo entityInfo = new EntityInfo( filePaths, collection.getRight(), collection.getMiddle(), collection.getLeft(), DataModel.DOCUMENT ); + entityInfoList.add( entityInfo ); + executorService.submit( new GatherEntriesTask( transactionManager, query, DataModel.DOCUMENT, collection.getLeft(), collectionData ) ); + } + } + + if ( !graphNamespaceIds.isEmpty() ) { + for ( Long graphNamespaceId : graphNamespaceIds ) { + List filePaths = new ArrayList<>(); + String query = "MATCH (*) RETURN n"; + //executeQuery2( query, DataModel.GRAPH, graphNamespaceId ); + String nsName = Catalog.snapshot().getNamespace( graphNamespaceId ).orElseThrow().name; + + String fileName = String.format( "graph_%s.txt", graphNamespaceId.toString() ); + File graphData = homeDirManager.registerNewFile( getDataFolder(), fileName ); + filePaths.add( String.format( "%s/%s", dataFolderPath, fileName ) ); + EntityInfo entityInfo = new EntityInfo( filePaths, nsName, nsName, graphNamespaceId, DataModel.GRAPH ); + entityInfoList.add( entityInfo ); + executorService.submit( new GatherEntriesTask( transactionManager, query, DataModel.GRAPH, graphNamespaceId, graphData ) ); + } + } + + log.info( "collected entry data" ); + //initializeFileLocation(); + executorService.shutdown(); + executorService.awaitTermination(10, TimeUnit.MINUTES); + log.info( "executor service was shut down" ); + + } catch ( Exception e ) { + throw new GenericRuntimeException( "An error occured during threadpooling to collect the data: " + e.getMessage() ); + } + /* + finally { + if ( Objects.nonNull( executorService ) && !executorService.isTerminated() ) { + log.error( "cancelling all non-finished tasks" ); + } + if ( Objects.nonNull( executorService ) ) { + //executorService.shutdownNow(); + log.info( "shutdown finished" ); + } + } + + */ + + try { + Calendar calendar = Calendar.getInstance(); + Date currentDate = calendar.getTime(); + //TODO(FF): calculate checksum + File manifestFile = homeDirManager.registerNewFile( getBackupFolder(), "manifest.txt" ); + BackupManifestGenerator.generateManifest( entityInfoList, "", manifestFile, currentDate ); + } catch ( Exception e ) { + throw new GenericRuntimeException( "Could not create manifest for backup" + e.getMessage() ); + } + + } +} diff --git a/dbms/src/main/java/org/polypheny/db/backup/datagatherer/GatherEntriesTask.java b/dbms/src/main/java/org/polypheny/db/backup/datagatherer/GatherEntriesTask.java new file mode 100644 index 0000000000..053bb8193b --- /dev/null +++ b/dbms/src/main/java/org/polypheny/db/backup/datagatherer/GatherEntriesTask.java @@ -0,0 +1,271 @@ +/* + * Copyright 2019-2023 The Polypheny Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.polypheny.db.backup.datagatherer; + +import java.io.BufferedOutputStream; +import java.io.DataOutputStream; +import java.io.File; +import java.io.FileOutputStream; +import java.io.PrintWriter; +import java.nio.charset.StandardCharsets; +import java.util.ArrayList; +import java.util.List; +import lombok.extern.slf4j.Slf4j; +import org.polypheny.db.PolyImplementation; +import org.polypheny.db.ResultIterator; +import org.polypheny.db.backup.BackupManager; +import org.polypheny.db.backup.datasaver.BackupFileWriter; +import org.polypheny.db.catalog.Catalog; +import org.polypheny.db.catalog.exceptions.GenericRuntimeException; +import org.polypheny.db.catalog.logistic.DataModel; +import org.polypheny.db.languages.LanguageManager; +import org.polypheny.db.languages.QueryLanguage; +import org.polypheny.db.processing.ImplementationContext.ExecutedContext; +import org.polypheny.db.processing.QueryContext; +import org.polypheny.db.transaction.Statement; +import org.polypheny.db.transaction.Transaction; +import org.polypheny.db.transaction.TransactionException; +import org.polypheny.db.transaction.TransactionManager; +import org.polypheny.db.type.entity.PolyValue; +import org.polypheny.db.type.entity.graph.PolyGraph; + +/** + * This class is a task that is executed by a thread. It gathers the entries from the database from one entity and writes them to a file. + */ +@Slf4j +public class GatherEntriesTask implements Runnable { + + private TransactionManager transactionManager; + private String query; + private DataModel dataModel; + private long namespaceId; + private File dataFile; + + + /** + * Creates a new GatherEntriesTask, which gathers the entries from the database from one entity and writes them to a file (one file per entity) + * @param transactionManager TransactionManager to use + * @param query gather query to execute + * @param dataModel DataModel of the entity where the entry data belongs to + * @param namespaceId Id of the namespace of the entity + * @param dataFile File to write the entries to + */ + public GatherEntriesTask( TransactionManager transactionManager, String query, DataModel dataModel, long namespaceId, File dataFile ) { + this.transactionManager = transactionManager; //TODO(FF): is transactionmanager thread safe to pass it like this?? + this.query = query; + this.dataModel = dataModel; + this.namespaceId = namespaceId; + this.dataFile = dataFile; + } + + + /** + * Runs the task, gathers the entries from the database from one entity and writes them to a file + */ + @Override + public void run() { + log.info( "thread for gather entries entered with query" + query ); + Transaction transaction; + Statement statement = null; + PolyImplementation result; + + switch ( dataModel ) { + case RELATIONAL: + try ( + //DataOutputStream out = new DataOutputStream(new BufferedOutputStream(new FileOutputStream(dataFile), 32768)); + PrintWriter pOut = new PrintWriter( new DataOutputStream( new BufferedOutputStream( new FileOutputStream( dataFile ), 32768 ) ) ); + //BufferedWriter bOut = new BufferedWriter( new OutputStreamWriter( new BufferedOutputStream( new FileOutputStream( dataFile ), 32768 ) ) ); + //DataInputStream in = new DataInputStream(new BufferedInputStream(new FileInputStream(dataFile))); + //String result = in.readUTF(); + //in.close(); + + ) { + BackupFileWriter out = new BackupFileWriter( dataFile ); + + transaction = transactionManager.startTransaction( Catalog.defaultUserId, false, "Backup Entry-Gatherer" ); + statement = transaction.createStatement(); + log.warn( "Batch size gather: " + BackupManager.batchSize ); + //TODO(FF): be aware for writing into file with batches that you dont overwrite the entries already in the file (evtl you need to read the whole file again + ExecutedContext executedQuery = LanguageManager.getINSTANCE().anyQuery( QueryContext.builder().language( QueryLanguage.from( "sql" ) ).query( query ).origin( "Backup Manager" ).transactionManager( transactionManager ).batch( BackupManager.batchSize ).namespaceId( namespaceId ).transactions( new ArrayList<>( List.of( transaction ) ) ).statement( statement ).build() ).get( 0 ); + //ExecutedContext executedQuery1 = LanguageManager.getINSTANCE().anyQuery( QueryContext.builder().language( QueryLanguage.from( "sql" ) ).query( query ).origin( "Backup Manager" ).transactionManager( transactionManager ).namespaceId( Catalog.defaultNamespaceId ).build(), statement ).get( 0 ); + // in case of results + ResultIterator iter = executedQuery.getIterator(); + + //batch repeats here + while ( iter.hasMoreRows() ) { + // liste mit tuples + List> resultsPerTable = iter.getNextBatch(); + log.info( resultsPerTable.toString() ); + //FIXME(FF): if this is array: [[1, PolyList(value=[PolyList(value=[PolyList(value=[PolyBigDecimal(value=111), PolyBigDecimal(value=112)]), PolyList(value=[PolyBigDecimal(value=121), PolyBigDecimal(value=122)])]), PolyList(value=[PolyList(value=[PolyBigDecimal(value=211), PolyBigDecimal(value=212)]), PolyList(value=[PolyBigDecimal(value=221), PolyBigDecimal(value=222)])])])]] + //value is shown correctly for tojson + + for ( List row : resultsPerTable ) { + for ( PolyValue polyValue : row ) { + String byteString = polyValue.serialize(); + byte[] byteBytes = polyValue.serialize().getBytes( StandardCharsets.UTF_8 ); + String jsonString = polyValue.toTypedJson(); + + //out.write( byteBytes ); + //out.write( byteString.getBytes( StandardCharsets.UTF_8 ) ); + //out.writeChars( jsonString ); + //pOut.println( jsonString ); + out.write( jsonString ); + //out.write( byteString ); + out.newLine(); + + //larger, testing easier, replace later + PolyValue deserialized = PolyValue.deserialize( byteString ); + PolyValue deserialized2 = PolyValue.fromTypedJson( jsonString, PolyValue.class ); + } + } + + } + out.flush(); + out.close(); + transaction.commit(); + + } catch ( Exception e ) { + throw new GenericRuntimeException( "Error while collecting entries: " + e.getMessage() ); + } catch ( TransactionException e ) { + throw new GenericRuntimeException( "Error while collecting entries: " + e.getMessage() ); + } + + break; + + case DOCUMENT: + try ( + //DataOutputStream out = new DataOutputStream(new BufferedOutputStream(new FileOutputStream(dataFile), 32768)); + PrintWriter pOut = new PrintWriter( new DataOutputStream( new BufferedOutputStream( new FileOutputStream( dataFile ), 32768 ) ) ); + //BufferedWriter bOut = new BufferedWriter( new OutputStreamWriter( new BufferedOutputStream( new FileOutputStream( dataFile ), 32768 ) ) ); + ) { + BackupFileWriter out = new BackupFileWriter( dataFile ); + // get a transaction and a statement + transaction = transactionManager.startTransaction( Catalog.defaultUserId, false, "Backup Entry-Gatherer" ); + statement = transaction.createStatement(); + ExecutedContext executedQuery = LanguageManager.getINSTANCE() + .anyQuery( + QueryContext.builder() + .language( QueryLanguage.from( "mql" ) ) + .query( query ).origin( "Backup Manager" ) + .transactionManager( transactionManager ) + .batch( BackupManager.batchSize ) + .namespaceId( namespaceId ) + .statement( statement ) + .build() + .addTransaction( transaction ) ).get( 0 ); + //ExecutedContext executedQuery1 = LanguageManager.getINSTANCE().anyQuery( QueryContext.builder().language( QueryLanguage.from( "mql" ) ).query( query ).origin( "Backup Manager" ).transactionManager( transactionManager ).namespaceId( namespaceId ).build(), statement ).get( 0 ); + + ResultIterator iter = executedQuery.getIterator(); + while ( iter.hasMoreRows() ) { + // list with tuples + List> resultsPerCollection = iter.getNextBatch(); + for ( List entry : resultsPerCollection ) { + for ( PolyValue polyValue : entry ) { + String byteString = polyValue.serialize(); + byte[] byteBytes = polyValue.serialize().getBytes( StandardCharsets.UTF_8 ); + String jsonString = polyValue.toTypedJson(); + + //out.write( byteBytes ); + //out.write( byteString.getBytes( StandardCharsets.UTF_8 ) ); + //pOut.println( jsonString); + out.write( jsonString ); + //bOut.write( byteString ); + out.newLine(); + //out.writeChars( jsonString ); + } + } + + //out.writeChars( resultsPerCollection.toString() ); + log.info( resultsPerCollection.toString() ); + } + out.flush(); + out.close(); + log.info( "end of thread reached: case document" ); + transaction.commit(); + } catch ( Exception e ) { + throw new GenericRuntimeException( "Error while collecting entries: " + e.getMessage() ); + } catch ( TransactionException e ) { + throw new GenericRuntimeException( "Error while collecting entries: " + e.getMessage() ); + } + break; + + case GRAPH: + try ( + DataOutputStream ouuut = new DataOutputStream( new BufferedOutputStream( new FileOutputStream( dataFile ), 32768 ) ); + //BufferedWriter bOut = new BufferedWriter( new OutputStreamWriter( new BufferedOutputStream( new FileOutputStream( dataFile ), 32768 ) ) ); + ) { + BackupFileWriter out = new BackupFileWriter( dataFile ); + // get a transaction and a statement + transaction = transactionManager.startTransaction( Catalog.defaultUserId, false, "Backup Entry-Gatherer" ); + statement = transaction.createStatement(); + ExecutedContext executedQuery = LanguageManager.getINSTANCE() + .anyQuery( + QueryContext.builder() + .language( QueryLanguage.from( "cypher" ) ) + .query( query ) + .origin( "Backup Manager" ) + .transactionManager( transactionManager ) + .batch( BackupManager.batchSize ) + .namespaceId( namespaceId ) + .statement( statement ) + .build() + .addTransaction( transaction ) ).get( 0 ); + //ExecutedContext executedQuery1 = LanguageManager.getINSTANCE().anyQuery( QueryContext.builder().language( QueryLanguage.from( "cypher" ) ).query( query ).origin( "Backup Manager" ).transactionManager( transactionManager ).namespaceId( namespaceId ).build(), statement ).get( 0 ); + + ResultIterator iter = executedQuery.getIterator(); + while ( iter.hasMoreRows() ) { + // liste mit tuples + List> graphPerNamespace = iter.getNextBatch(); + for ( List entry : graphPerNamespace ) { + for ( PolyValue polyValue : entry ) { + + String byteString = polyValue.serialize(); + byte[] byteBytes = polyValue.serialize().getBytes( StandardCharsets.UTF_8 ); + String jsonString = polyValue.toTypedJson(); + + //out.write( byteBytes ); + //out.write( byteString.getBytes( StandardCharsets.UTF_8 ) ); + //pOut.println( jsonString); + out.write( jsonString ); + //bOut.write( byteString ); + out.newLine(); + //out.writeChars( jsonString ); + } + } + log.info( graphPerNamespace.toString() ); + } + out.flush(); + out.close(); + log.info( "end of thread reached: case graph" ); + transaction.commit(); + } catch ( Exception e ) { + throw new GenericRuntimeException( "Error while collecting entries: " + e.getMessage() ); + } catch ( TransactionException e ) { + throw new GenericRuntimeException( "Error while collecting entries: " + e.getMessage() ); + } + break; + + default: + throw new GenericRuntimeException( "Backup - GatherEntries: DataModel not supported" ); + } + log.info( "end of thread reached - completely done" ); + statement.close(); + + } + + +} diff --git a/dbms/src/main/java/org/polypheny/db/backup/datagatherer/GatherSchema.java b/dbms/src/main/java/org/polypheny/db/backup/datagatherer/GatherSchema.java new file mode 100644 index 0000000000..de5a693076 --- /dev/null +++ b/dbms/src/main/java/org/polypheny/db/backup/datagatherer/GatherSchema.java @@ -0,0 +1,314 @@ +/* + * Copyright 2019-2023 The Polypheny Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.polypheny.db.backup.datagatherer; + +import com.google.common.collect.ImmutableMap; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.Map.Entry; +import java.util.stream.Collectors; +import lombok.extern.slf4j.Slf4j; +import org.polypheny.db.backup.BackupInformationObject; +import org.polypheny.db.catalog.Catalog; +import org.polypheny.db.catalog.IdBuilder; +import org.polypheny.db.catalog.entity.LogicalConstraint; +import org.polypheny.db.catalog.entity.logical.LogicalCollection; +import org.polypheny.db.catalog.entity.logical.LogicalColumn; +import org.polypheny.db.catalog.entity.logical.LogicalEntity; +import org.polypheny.db.catalog.entity.logical.LogicalForeignKey; +import org.polypheny.db.catalog.entity.logical.LogicalGraph; +import org.polypheny.db.catalog.entity.logical.LogicalIndex; +import org.polypheny.db.catalog.entity.logical.LogicalMaterializedView; +import org.polypheny.db.catalog.entity.logical.LogicalNamespace; +import org.polypheny.db.catalog.entity.logical.LogicalPrimaryKey; +import org.polypheny.db.catalog.entity.logical.LogicalTable; +import org.polypheny.db.catalog.entity.logical.LogicalView; +import org.polypheny.db.catalog.impl.PolyCatalog; +import org.polypheny.db.catalog.logistic.DataModel; +import org.polypheny.db.catalog.snapshot.Snapshot; + + +/** + * This class gathers the schema information from Polypheny-DB, and safes it in a BackupInformationObject + */ +@Slf4j +public class GatherSchema { + + //gather the schemas from Polypheny-DB + private final IdBuilder idBuilder = IdBuilder.getInstance(); + private Snapshot snapshot; + private BackupInformationObject backupInformationObject; + + private Catalog catalog = PolyCatalog.getInstance(); + + //don't safe data here, but safe it informationobject... + //ImmutableMap namespaces; + List namespaces; + List relNamespaces; + List graphNamespaces; + List docNamespaces; + + //namespace id, list of tables for the namespace + ImmutableMap> tables; + //TODO(FF): make views and materialized views + ImmutableMap> views; + ImmutableMap> materializedViews; + ImmutableMap> collections; + ImmutableMap graphs; + + //table id, list of views for the table + ImmutableMap> columns; + ImmutableMap> primaryKeysPerTable; + ImmutableMap> foreignKeysPerTable; + + // index -> can only be created per (one) table + ImmutableMap> logicalIndexes; + ImmutableMap> constraints; + + Boolean collectedRel = false; + Boolean collectedDoc = false; + Boolean collectedGraph = false; + + + public GatherSchema() { + } + + + /** + * Starts the gathering of the schema information from Polypheny-DB + * @param backupInformationObject The BackupInformationObject where the gathered information should be saved + * @return The BackupInformationObject with the gathered schema information + */ + public BackupInformationObject start( BackupInformationObject backupInformationObject ) { + log.debug( "gather schemas" ); + this.backupInformationObject = backupInformationObject; + + //figure out how to get the snapshot from catalog bzw. how to create a new snapshot, and take infos out of it + getSnapshot(); + getRelSchema(); + getDocSchema(); + getGraphSchema(); + testPrint(); + return backupInformationObject; + } + + + /** + * Gets the snapshot from the catalog, and safes it in the class variable snapshot + * Also safes list of all namespaces + */ + private void getSnapshot() { + + this.snapshot = catalog.getSnapshot(); + int nbrNamespaces = snapshot.getNamespaces( null ).size(); + int publicTables = snapshot.rel().getTablesFromNamespace( 0 ).size(); + + //this.namespaces = ImmutableMap.copyOf( namespaces ); + this.namespaces = snapshot.getNamespaces( null ); + this.backupInformationObject.setNamespaces( namespaces ); + + log.debug( "# namespaces = " + nbrNamespaces ); + log.debug( "# tables from public = " + publicTables ); + } + + + /** + * Gets the tables, views, columns, keys, indexes, constraints and nodes from the snapshot + */ + private void getRelSchema() { + // TODO(FF): differentiate between views and materialized views (safe them seperately) + Map> tables = new HashMap<>(); + Map> views = new HashMap<>(); + Map> materializedViews = new HashMap<>(); + Map> columns = new HashMap<>(); + Map> constraints = new HashMap<>(); + //Map> keysPerTable = new HashMap<>(); + Map> primaryKeysPerTable = new HashMap<>(); + Map> foreignKeysPerTable = new HashMap<>(); + Map> logicalIndex = new HashMap<>(); + //List getConnectedViews( long id ); + + List relNamespaces = namespaces.stream().filter( n -> n.dataModel == DataModel.RELATIONAL ).collect( Collectors.toList() ); + this.relNamespaces = relNamespaces; + this.backupInformationObject.setRelNamespaces( relNamespaces ); + + // go through the list of namespaces and get the id of each namespace, map the tables to the namespace id + //TODO(FF)?: views - list is just empty, but creates it nontheless, same for constraints, keys + for ( LogicalNamespace namespace : relNamespaces ) { + Long namespaceId = namespace.getId(); + + // get tables from namespace + List tablesFromNamespace = snapshot.rel().getTablesFromNamespace( namespaceId ); + //List tablesFromNamespace = snapshot.rel().getTables( namespaceId, null ).stream().map( v -> v.unwrap( LogicalEntity.class ) ).collect(Collectors.toList( )); + tables.put( namespaceId, tablesFromNamespace ); + + // get other schema information for each table + for ( LogicalTable table : tablesFromNamespace ) { + Long tableId = table.getId(); + + //views + List connectedViews = snapshot.rel().getConnectedViews( tableId ); + //TODO(FF): see if this actually works... (does it seperate correctly?) (views not handles yet correctly in snapshot) + //get all materialized views from the list of views and materialized views + List connMatView = connectedViews.stream().filter( v -> v instanceof LogicalMaterializedView ).map( v -> (LogicalMaterializedView) v ).collect( Collectors.toList() ); + //get all views from the list of views and materialized views + List connView = connectedViews.stream().filter( v -> v instanceof LogicalView ).map( v -> v ).collect( Collectors.toList() ); + //safes the views and materialized views in the maps + views.put( tableId, connView ); + materializedViews.put( tableId, connMatView ); + + //cols + List tableColumns = snapshot.rel().getColumns( tableId ); + columns.put( tableId, tableColumns ); + + //keys - (old: all keys selected: incl. pk, fk, constr, indexes + //snapshot.rel().getKeys(); - get all keys + //List tableKeys = snapshot.rel().getTableKeys( tableId ); + //keysPerTable.put( tableId, tableKeys ); + + //primary keys (for the table) + List pkk = snapshot.rel().getPrimaryKeys().stream().filter( k -> k.entityId == tableId ).collect( Collectors.toList() ); + primaryKeysPerTable.put( tableId, pkk ); + + // foreign keys + List fk = snapshot.rel().getForeignKeys( tableId ); + foreignKeysPerTable.put( tableId, fk ); + /* + LogicalForeignKey( + name=fk_students_album, + referencedKeyId=0, + referencedKeySchemaId=0, + referencedKeyTableId=5, + updateRule=RESTRICT, + deleteRule=RESTRICT, + referencedKeyColumnIds=[26]) + */ + + //indexes + List logicalIdx = snapshot.rel().getIndexes( tableId, false ); + logicalIndex.put( tableId, logicalIdx ); + + // get list of constraints for each table + List tableConstraints = snapshot.rel().getConstraints( tableId ); + constraints.put( tableId, tableConstraints ); + + + } + + } + + //safes the gathered information in the class variables + //this.tables = ImmutableMap.copyOf( tables.entrySet().stream().collect(Collectors.toMap(v -> v.getKey(), v -> v.getValue().stream().map( e -> e.unwrap( LogicalEntity.class ) ).collect(Collectors.toList() ) ))); + //this.tables = (ImmutableMap>) ImmutableMap.copyOf( (Map>) tables.entrySet().stream().collect(Collectors.toMap(v -> v.getKey(), v -> v.getValue().stream().map( e -> e.unwrap( LogicalEntity.class ) ).collect(Collectors.toList() ) )) ); + this.tables = ImmutableMap.copyOf( tables.entrySet().stream().collect( Collectors.toMap( Entry::getKey, v -> v.getValue().stream().map( e -> e.unwrap( LogicalEntity.class ).orElseThrow() ).collect( Collectors.toList() ) ) ) ); + //this.tables = ImmutableMap.copyOf( (Map>) tables ); + //this.tables = ImmutableMap.copyOf( (Map>) tables ); + this.backupInformationObject.setTables( this.tables ); + this.views = ImmutableMap.copyOf( views.entrySet().stream().collect( Collectors.toMap( Entry::getKey, v -> v.getValue().stream().map( e -> e.unwrap( LogicalEntity.class ).orElseThrow() ).collect( Collectors.toList() ) ) ) ); + this.backupInformationObject.setViews( this.views ); + this.columns = ImmutableMap.copyOf( columns ); + this.backupInformationObject.setColumns( this.columns ); + this.constraints = ImmutableMap.copyOf( constraints ); + this.backupInformationObject.setConstraints( this.constraints ); + //this.keysPerTable = ImmutableMap.copyOf( keysPerTable ); + this.primaryKeysPerTable = ImmutableMap.copyOf( primaryKeysPerTable ); + this.backupInformationObject.setPrimaryKeysPerTable( this.primaryKeysPerTable ); + this.foreignKeysPerTable = ImmutableMap.copyOf( foreignKeysPerTable ); + this.backupInformationObject.setForeignKeysPerTable( this.foreignKeysPerTable ); + //this.logicalIndexes = ImmutableMap.copyOf( logicalIndex.entrySet().stream().collect(Collectors.toMap(v -> v.getKey(), v -> v.getValue().stream().map( e -> e.unwrap( LogicalEntity.class ) ).collect(Collectors.toList() ) )) ); + //this.logicalIndexes = ImmutableMap.copyOf( logicalIndex.entrySet().stream().collect( Collectors.toMap(v -> v.getKey(), v -> v.getValue().stream().map( e -> e.unwrap( LogicalEntity.class ) )) ) ) + //TODO(FF): unwrap doesn't work for indexes + this.backupInformationObject.setLogicalIndexes( this.logicalIndexes ); + this.materializedViews = ImmutableMap.copyOf( materializedViews.entrySet().stream().collect( Collectors.toMap( Entry::getKey, v -> v.getValue().stream().map( e -> e.unwrap( LogicalEntity.class ).orElseThrow() ).collect( Collectors.toList() ) ) ) ); + this.backupInformationObject.setMaterializedViews( this.materializedViews ); + + this.backupInformationObject.setCollectedRelSchema( true ); + + } + + + /** + * Gets the Graph schema from the snapshot, and safes it in class variables + */ + private void getGraphSchema() { + + List graphNamespaces = namespaces.stream().filter( n -> n.dataModel == DataModel.GRAPH ).collect( Collectors.toList() ); + this.graphNamespaces = graphNamespaces; + this.backupInformationObject.setGraphNamespaces( graphNamespaces ); + + List graphsFromNamespace = snapshot.graph().getGraphs( null ); + + //TODO(FF): can there only be one graph per namespace? + Map nsGraphs = new HashMap<>(); + + //for each graph get the namespaceId, see if it matches with the current namespace, and map the graph to the namespaceid + for ( LogicalGraph graph : graphsFromNamespace ) { + Long graphNsId = graph.getNamespaceId(); + + // map the namespaceId to the graph + nsGraphs.put( graphNsId, graph ); + } + + //safes the gathered information in the class variables + this.graphs = ImmutableMap.copyOf( nsGraphs ); + this.backupInformationObject.setGraphs( this.graphs ); + this.backupInformationObject.setCollectedGraphSchema( true ); + + } + + + /** + * Gets the Doc schema from the snapshot, and safes it in class variables + */ + private void getDocSchema() { + + Map> nsCollections = new HashMap<>(); + List docNamespaces = namespaces.stream().filter( n -> n.dataModel == DataModel.DOCUMENT ).collect( Collectors.toList() ); + this.docNamespaces = docNamespaces; + this.backupInformationObject.setDocNamespaces( docNamespaces ); + + for ( LogicalNamespace namespace : docNamespaces ) { + Long namespaceId = namespace.getId(); + + // get collections per namespace + List collectionsFromNamespace = snapshot.doc().getCollections( namespaceId, null ); + nsCollections.put( namespaceId, collectionsFromNamespace ); + } + + //safes the gathered information in the class variables + this.collections = ImmutableMap.copyOf( nsCollections.entrySet().stream().collect( Collectors.toMap( Entry::getKey, v -> v.getValue().stream().map( e -> e.unwrap( LogicalEntity.class ).orElseThrow() ).collect( Collectors.toList() ) ) ) ); + this.backupInformationObject.setCollections( this.collections ); + this.backupInformationObject.setCollectedDocSchema( true ); + } + + /** + * Prints some of the gathered information (in a debug statement) + */ + private void testPrint() { + log.debug( "============================================= test print ==============================================" ); + log.debug( "namespaces: " + namespaces.toString() ); + log.debug( "tables: " + tables.toString() ); + log.debug( "views: " + views.toString() ); + log.debug( "columns: " + columns.toString() ); + log.debug( "constraints: " + constraints.toString() ); + log.debug( "primarykeysPerTable: " + primaryKeysPerTable.toString() ); + log.debug( "foreignkeysPerTable: " + foreignKeysPerTable.toString() ); + log.debug( "============================================= end print ==============================================" ); + } + +} diff --git a/dbms/src/main/java/org/polypheny/db/backup/datainserter/InsertEntries.java b/dbms/src/main/java/org/polypheny/db/backup/datainserter/InsertEntries.java new file mode 100644 index 0000000000..5e7e3548e7 --- /dev/null +++ b/dbms/src/main/java/org/polypheny/db/backup/datainserter/InsertEntries.java @@ -0,0 +1,137 @@ +/* + * Copyright 2019-2023 The Polypheny Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.polypheny.db.backup.datainserter; + +import java.io.File; +import java.nio.file.Path; +import java.util.Optional; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.Executors; +import java.util.concurrent.TimeUnit; +import lombok.extern.slf4j.Slf4j; +import org.polypheny.db.backup.BackupManager; +import org.polypheny.db.backup.datasaver.manifest.BackupManifest; +import org.polypheny.db.backup.datasaver.manifest.EntityInfo; +import org.polypheny.db.backup.datasaver.manifest.ManifestReader; +import org.polypheny.db.catalog.Catalog; +import org.polypheny.db.catalog.exceptions.GenericRuntimeException; +import org.polypheny.db.transaction.TransactionManager; +import org.polypheny.db.util.PolyphenyHomeDirManager; + +@Slf4j +public class InsertEntries { + Optional backupFile = null; + TransactionManager transactionManager = null; + + + /** + * Organizes the insertion of data from the backup + * @param transactionManager TransactionManager to use + */ + public InsertEntries(TransactionManager transactionManager) { + this.transactionManager = transactionManager; + } + + + /** + * Starts the insertion of data from the backup, creates a threadpool where each thread handles one file, i.e. one entity + */ + public void start() { + ExecutorService executorService = null; + try { + executorService = Executors.newFixedThreadPool( BackupManager.threadNumber ); + PolyphenyHomeDirManager homeDirManager = PolyphenyHomeDirManager.getInstance(); + + this.backupFile = homeDirManager.getHomeFile( "backup" ); + Optional manifestFile = homeDirManager.getHomeFile( "backup/manifest.txt" ); + BackupManifest manifest = new ManifestReader().readManifest( manifestFile.get().getPath() ); + + File[] files = backupFile.get().listFiles(); + + for ( EntityInfo entityInfo : manifest.getEntityInfos() ) { + for ( String path : entityInfo.getFilePaths()) { + //TODO(FF): check if file is there from path, if not, skip it and move to next file... + //File filee = new File( path.toString() ); + //Path filePath = filee.toPath(); + //File file = filePath.toFile(); + File file = homeDirManager.getHomeFile( path ).get(); + log.info( path ); + if ( file.isFile() && file.exists() ) { + long nsId = Catalog.snapshot().getNamespace( entityInfo.getNamespaceName() ).orElseThrow().id; + log.info( "insertEntries - file exists: " + file.getPath() ); + //TransactionManager transactionManager, File dataFile, DataModel dataModel, Long namespaceId, String namespaceName, String tableName, int nbrCols + executorService.submit( new InsertEntriesTask( transactionManager, file, entityInfo.getDataModel(), nsId, entityInfo.getNamespaceName(), entityInfo.getEntityName(), entityInfo.getNbrCols() ) ); + } else { + log.warn( "Insert Entries for Backup: " + path + " does not exist, but is listed in manifest" ); + } + + } + } + + /* + + if ( backupFile != null ) { + for ( File file : files ) { + if ( file.isDirectory() ) { + log.info( "insertEntries: " + file.getPath() ); + File[] subFiles = file.listFiles(); + for ( File subFile : subFiles ) { + if ( subFile.isFile() ) { + executorService.submit( new InsertEntriesTask( subFile, DataModel.RELATIONAL, "reli", "album" ) ); + } + } + } + + File dataFolder = homeDirManager.getFileIfExists( "backup/data" ); + File[] dataFiles = dataFolder.listFiles(); + for ( File dataFile : dataFiles ) { //i can go through file... (or check if file is file, bcs if it is folder, subfiles are listed + executorService.submit( new InsertEntriesTask( dataFile ) ); + } + + */ + /* + if ( file.isFile() ) { + executorService.submit( new InsertEntriesTask( file ) ); + } + } + } + */ + + executorService.shutdown(); + executorService.awaitTermination(10, TimeUnit.MINUTES); + log.info( "executor service was shut down" ); + + } catch ( Exception e ) { + throw new GenericRuntimeException( "Error with threadpool, datagathering: " + e.getMessage() ); + } + /* + finally { + if ( Objects.nonNull( executorService ) && !executorService.isTerminated() ) { + log.error( "cancelling all non-finished tasks" ); + } + if ( Objects.nonNull( executorService ) ) { + //executorService.shutdownNow(); + log.info( "shutdown finished" ); + } + } + + */ + + + } + +} diff --git a/dbms/src/main/java/org/polypheny/db/backup/datainserter/InsertEntriesTask.java b/dbms/src/main/java/org/polypheny/db/backup/datainserter/InsertEntriesTask.java new file mode 100644 index 0000000000..2dc6276140 --- /dev/null +++ b/dbms/src/main/java/org/polypheny/db/backup/datainserter/InsertEntriesTask.java @@ -0,0 +1,474 @@ +/* + * Copyright 2019-2023 The Polypheny Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.polypheny.db.backup.datainserter; + +import java.io.BufferedInputStream; +import java.io.DataInputStream; +import java.io.File; +import java.io.FileInputStream; +import java.io.IOException; +import java.util.HashMap; +import java.util.List; +import java.util.stream.Collectors; +import lombok.NonNull; +import lombok.extern.slf4j.Slf4j; +import org.jetbrains.annotations.NotNull; +import org.polypheny.db.PolyImplementation; +import org.polypheny.db.backup.BackupManager; +import org.polypheny.db.backup.datasaver.BackupFileReader; +import org.polypheny.db.catalog.Catalog; +import org.polypheny.db.catalog.entity.logical.LogicalNamespace; +import org.polypheny.db.catalog.exceptions.GenericRuntimeException; +import org.polypheny.db.catalog.logistic.DataModel; +import org.polypheny.db.languages.LanguageManager; +import org.polypheny.db.languages.QueryLanguage; +import org.polypheny.db.processing.ImplementationContext.ExecutedContext; +import org.polypheny.db.processing.QueryContext; +import org.polypheny.db.transaction.Statement; +import org.polypheny.db.transaction.Transaction; +import org.polypheny.db.transaction.TransactionException; +import org.polypheny.db.transaction.TransactionManager; +import org.polypheny.db.type.PolyType; +import org.polypheny.db.type.entity.PolyList; +import org.polypheny.db.type.entity.PolyString; +import org.polypheny.db.type.entity.PolyValue; +import org.polypheny.db.type.entity.graph.PolyDictionary; +import org.polypheny.db.type.entity.graph.PolyEdge; +import org.polypheny.db.type.entity.graph.PolyGraph; +import org.polypheny.db.type.entity.graph.PolyNode; +import org.polypheny.db.type.entity.relational.PolyMap; + +@Slf4j +public class InsertEntriesTask implements Runnable{ + TransactionManager transactionManager; + File dataFile; + DataModel dataModel; + Long namespaceId; + String namespaceName; + String entityName; + int nbrCols; + + + /** + * Executes a task that inserts entries into the database. One task is created for each entity that should be inserted and handles only one entity + * The data is inserted batchwise, meaning that a new insertion query is created for each batch of entries + * @param transactionManager TransactionManager + * @param dataFile File that contains the entry data + * @param dataModel DataModel of the data to be inserted + * @param namespaceId Id of the namespace of the entity where the entries should be inserted + * @param namespaceName Name of the namespace of the entity where the entries should be inserted + * @param entityName Name of the entity where the entries should be inserted + * @param nbrCols Number of columns of the entity (for tables, for other data models it is ignored) + */ + public InsertEntriesTask( TransactionManager transactionManager, File dataFile, DataModel dataModel, Long namespaceId, String namespaceName, String entityName, int nbrCols ) { + this.transactionManager = transactionManager; + this.dataFile = dataFile; + this.dataModel = dataModel; + this.namespaceId = namespaceId; + this.namespaceName = namespaceName; + this.entityName = entityName; + this.nbrCols = nbrCols; + } + + + /** + * Reads the data from the file and inserts it into the database + */ + @Override + public void run() { + Transaction transaction; + Statement statement; + PolyImplementation result; + + try( + DataInputStream iin = new DataInputStream(new BufferedInputStream(new FileInputStream(dataFile), 32768)); + //BufferedReader bIn = new BufferedReader( new InputStreamReader( new BufferedInputStream( new FileInputStream( dataFile ), 32768 ) ) ); + ) + { + BackupFileReader in = new BackupFileReader( dataFile ); + int elementCounter = 0; + int batchCounter = 0; + String query = ""; + + switch ( dataModel ) { + case RELATIONAL: + String inLine = ""; + String row = ""; + transaction = transactionManager.startTransaction( Catalog.defaultUserId, false, "Backup Entry-Inserter" ); //FIXME: bruuche en transaction för jede batch, bzw sobald transaction commited... passts...... commit erscht am schloss.... + String relValues = ""; + + //build up row for query (since each value is one row in the file), and then execute query for each row + while ( (inLine = in.readLine()) != null ) { + elementCounter++; + PolyValue deserialized = PolyValue.fromTypedJson( inLine, PolyValue.class ); + + String value = deserialized.toJson(); + value = value.replaceAll( "'", "''" ); + if (PolyType.CHAR_TYPES.contains( deserialized.getType() ) || PolyType.DATETIME_TYPES.contains( deserialized.getType() ) ) { + value = String.format( "'%s'", value ); + } else { + value = String.format( "%s", value ); + } + row += value + ", "; + + if (elementCounter == nbrCols) { + row = row.substring( 0, row.length() - 2 ); // remove last ", " + //query = String.format( "INSERT INTO %s.%s VALUES (%s)", namespaceName, entityName, row ); + //log.info( row ); + + //relValues = relValues + row + ", "; + row = String.format( "(%s), ", row ); + relValues = relValues + row; + //log.info( relValues ); + elementCounter = 0; + batchCounter ++; + //query = ""; + row= ""; + } + + if (batchCounter == BackupManager.batchSize) { + //log.info( "in batchcounter: " + relValues ); + relValues = relValues.substring( 0, relValues.length() - 2 ); // remove last ", " + query = String.format( "INSERT INTO %s.%s VALUES %s", namespaceName, entityName, relValues ); + + log.info( query ); + try { + ExecutedContext executedQuery = LanguageManager.getINSTANCE() + .anyQuery( + QueryContext.builder() + .language( QueryLanguage.from( "sql" ) ) + .query( query ).origin( "Backup Manager" ) + .transactionManager( transactionManager ) + //.statement( statement ) + .namespaceId( namespaceId ) + .build() + .addTransaction( transaction ) ).get( 0 ); + } catch ( Exception e) { + throw new GenericRuntimeException("Could not insert relational backup data from query: " + query + " with error message:" + e.getMessage()); + } + + batchCounter = 0; + relValues = ""; + } + + } + if ( batchCounter != 0 ) { + //execute the statement with the remaining values + relValues = relValues.substring( 0, relValues.length() - 2 ); // remove last ", " + query = String.format( "INSERT INTO %s.%s VALUES %s", namespaceName, entityName, relValues ); + + log.info( "rest: " + query ); + try { + ExecutedContext executedQuery = LanguageManager.getINSTANCE() + .anyQuery( + QueryContext.builder() + .language( QueryLanguage.from( "sql" ) ) + .query( query ) + .origin( "Backup Manager" ) + .transactionManager( transactionManager ) + .namespaceId( namespaceId ) + //.statement( statement ) + .build().addTransaction( transaction ) ).get( 0 ); + } catch ( Exception e ) { + throw new GenericRuntimeException("Could not insert relational backup data from query: " + query + " with error message:" + e.getMessage()); + + } + batchCounter = 0; + query = ""; + } + transaction.commit(); + batchCounter = 0; + break; + case DOCUMENT: + transaction = transactionManager.startTransaction( Catalog.defaultUserId, false, "Backup Entry-Inserter" ); + //statement = transaction.createStatement(); + String docValues = ""; + while ( (inLine = in.readLine()) != null ) { + PolyValue deserialized = PolyValue.fromTypedJson( inLine, PolyValue.class ); + String value = deserialized.toJson(); + docValues += value + ", "; + batchCounter++; + //query = String.format( "db.%s.insertOne(%s)", entityName, value ); + + if (batchCounter == BackupManager.batchSize) { + // remove the last ", " from the string + //statement = transaction.createStatement(); + docValues = docValues.substring( 0, docValues.length() - 2 ); + + query = String.format( "db.%s.insertMany([%s])", entityName, docValues ); + log.info( query ); + try { + ExecutedContext executedQuery = LanguageManager.getINSTANCE() + .anyQuery( + QueryContext.builder() + .language( QueryLanguage.from( "mql" ) ) + .query( query ).origin( "Backup Manager" ) + .transactionManager( transactionManager ) + .namespaceId( namespaceId ) + //.statement( statement ) + .build() + .addTransaction( transaction ) ).get( 0 ); + } catch ( Exception e ) { + throw new GenericRuntimeException("Could not insert document backup data from query: " + query + " with error message:" + e.getMessage()); + + } + + batchCounter = 0; + docValues = ""; + query = ""; + } + + } + + if (batchCounter != 0) { + //statement = transaction.createStatement(); + // remove the last ", " from the string + docValues = docValues.substring( 0, docValues.length() - 2 ); + + query = String.format( "db.%s.insertMany([%s])", entityName, docValues ); + log.info( "rest: " + query ); + try { + ExecutedContext executedQuery = LanguageManager.getINSTANCE() + .anyQuery( + QueryContext.builder() + .language( QueryLanguage.from( "mql" ) ) + .query( query ).origin( "Backup Manager" ) + .transactionManager( transactionManager ) + .namespaceId( namespaceId ) + //.statement( statement ) + .build().addTransaction( transaction ) ).get( 0 ); + } catch ( Exception e ) { + throw new GenericRuntimeException("Could not insert document backup data from query: " + query + " with error message:" + e.getMessage()); + + } + + batchCounter = 0; + docValues = ""; + query = ""; + } + + transaction.commit(); + break; + case GRAPH: + //FIXME: edges and nodes are matched via their original id, which is now reinserted into the new nodes (and edges) as an invisible '_id'. But if you create a backup from data that was already once inserted, you get duplicate key '_id' errors + //TODO: only one direction for edges implemented, direction is not checked, is always source -> target + // not batched + transaction = transactionManager.startTransaction( Catalog.defaultUserId, false, "Backup Entry-Inserter" ); + String graphValues = ""; + PolyValue deserialized = null; + + while ( (inLine = in.readLine()) != null ) { + deserialized = PolyValue.fromTypedJson( inLine, PolyGraph.class ); //--> deserialized is null?? + } + + int nodeCounter = 0; + // id of the node and the created label of the node + HashMap nodeMap = new HashMap<>(); + String nodesString = ""; + String edgesString = ""; + + @NotNull PolyGraph graph = deserialized.asGraph(); + @NotNull PolyMap nodes = graph.getNodes(); + @NotNull PolyMap edges = graph.getEdges(); + //List nodes = deserialized.asList().stream().filter( v -> v.isNode() ).map( v -> v.asNode() ).collect( Collectors.toList() ); + + // go through and create all nodes + for (PolyNode node : nodes.values()) { + String labels = getLabels( node.labels ); + String properties = getProperties( node.properties ); + + String nString = "n" + String.valueOf( nodeCounter ); + nodeMap.put( node.id, nString ); + nodeCounter++; + batchCounter++; + + nodesString += String.format( "(%s:%s {_id:'%s', %s}), ", nString, labels, node.id, properties ); + + // if the batch size is reached, execute the query + if (batchCounter == BackupManager.batchSize) { + // remove the last ", " from the string + nodesString = nodesString.substring( 0, nodesString.length() - 2 ); + query = String.format( "CREATE %s", nodesString ); + log.info( query ); + ExecutedContext executedQuery = LanguageManager.getINSTANCE() + .anyQuery( + QueryContext.builder() + .language( QueryLanguage.from( "cypher" ) ) + .query( query ).origin( "Backup - Insert Graph Entries" ) + .transactionManager( transactionManager ) + .namespaceId( namespaceId ) + .build() + .addTransaction( transaction ) ).get( 0 ); + + batchCounter = 0; + nodesString = ""; + query = ""; + } + + } + + // create the nodes that are left from not completed batch + if (batchCounter != 0) { + // remove the last ", " from the string + nodesString = nodesString.substring( 0, nodesString.length() - 2 ); + query = String.format( "CREATE %s", nodesString ); + log.info( query ); + ExecutedContext executedQuery = LanguageManager.getINSTANCE() + .anyQuery( + QueryContext.builder() + .language( QueryLanguage.from( "cypher" ) ) + .query( query ).origin( "Backup - Insert Graph Entries" ) + .transactionManager( transactionManager ) + .namespaceId( namespaceId ) + .build() + .addTransaction( transaction ) ).get( 0 ); + + batchCounter = 0; + nodesString = ""; + query = ""; + } + + + // go through all edges + String matchString = ""; + for ( PolyEdge edge : edges.values() ) { + String labels = getLabels( edge.labels ); + String properties = getProperties( edge.properties ); + String source = nodeMap.get( edge.source ); + String target = nodeMap.get( edge.target ); + batchCounter++; + + if ( !properties.isEmpty() ) { + properties = String.format( "'%s', %s", edge.id, properties ); + } else { + properties = String.format( "'%s'", edge.id ); + } + + matchString += String.format( "(%s {_id: '%s'}), (%s {_id: '%s'}), ", source, edge.source, target, edge.target ); + edgesString += String.format( "(%s)-[:%s {_id:%s}]->(%s), ", source, labels, properties, target ); + + if (batchCounter == BackupManager.batchSize) { + if ( !edgesString.isEmpty() ) { + // remove the last ", " from the string + edgesString = edgesString.substring( 0, edgesString.length() - 2 ); + matchString = matchString.substring( 0, matchString.length() - 2 ); + query = String.format( "MATCH %s CREATE %s", matchString, edgesString ); + + log.info( query ); + ExecutedContext executedQuery = LanguageManager.getINSTANCE() + .anyQuery( + QueryContext.builder() + .language( QueryLanguage.from( "cypher" ) ) + .query( query ).origin( "Backup - Insert Graph Entries" ) + .transactionManager( transactionManager ) + .namespaceId( namespaceId ) + .build() + .addTransaction( transaction ) ).get( 0 ); + + batchCounter = 0; + edgesString = ""; + matchString = ""; + labels = ""; + properties = ""; + source = ""; + target = ""; + query = ""; + } + } + + } + + // create the edges that are left from not completed batch + if (batchCounter != 0) { + if ( !edgesString.isEmpty() ) { + // remove the last ", " from the string + edgesString = edgesString.substring( 0, edgesString.length() - 2 ); + matchString = matchString.substring( 0, matchString.length() - 2 ); + query = String.format( "MATCH %s CREATE %s", matchString, edgesString ); + + log.info( query ); + ExecutedContext executedQuery = LanguageManager.getINSTANCE() + .anyQuery( + QueryContext.builder() + .language( QueryLanguage.from( "cypher" ) ) + .query( query ).origin( "Backup - Insert Graph Entries" ) + .transactionManager( transactionManager ) + .namespaceId( namespaceId ) + .build() + .addTransaction( transaction ) ).get( 0 ); + + batchCounter = 0; + edgesString = ""; + matchString = ""; + query = ""; + } + } + + transaction.commit(); + break; + default: + throw new GenericRuntimeException( "Unknown data model" ); + } + in.close(); + log.info( "data-insertion: end of thread for " + entityName ); + + + } catch(Exception e){ + throw new GenericRuntimeException( "Error while inserting entries: " + e.getMessage() ); + } catch ( TransactionException e ) { + throw new GenericRuntimeException( "Error while inserting entries: " + e.getMessage() ); + } + } + + + /** + * Gets the labels of a node as a list of strings seperated by a comma + * @param labels a PolyList of labels + * @return String of labels, seperated by a comma + */ + String getLabels( PolyList labels ) { + String labelsString = ""; + for ( PolyString label : labels ) { + labelsString += label + ", "; + } + if ( !labelsString.isEmpty() ) { + labelsString = labelsString.substring( 0, labelsString.length() - 2 ); + } + return labelsString; + } + + + /** + * Gets the properties of a node as a string + * @param properties a PolyDictionary of properties + * @return String of properties + */ + String getProperties( PolyDictionary properties ) { + String propsString = ""; + for ( PolyString key : properties.keySet() ) { + propsString += String.format( "%s: '%s', ", key, properties.get( key ) ); + } + + if ( !propsString.isEmpty() ) { + propsString = propsString.substring( 0, propsString.length() - 2 ); + } + + return propsString; + } + + +} diff --git a/dbms/src/main/java/org/polypheny/db/backup/datainserter/InsertSchema.java b/dbms/src/main/java/org/polypheny/db/backup/datainserter/InsertSchema.java new file mode 100644 index 0000000000..306b55870e --- /dev/null +++ b/dbms/src/main/java/org/polypheny/db/backup/datainserter/InsertSchema.java @@ -0,0 +1,658 @@ +/* + * Copyright 2019-2023 The Polypheny Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.polypheny.db.backup.datainserter; + +import com.google.common.collect.ImmutableMap; +import java.util.ArrayList; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import lombok.extern.slf4j.Slf4j; +import org.polypheny.db.PolyImplementation; +import org.polypheny.db.ResultIterator; +import org.polypheny.db.backup.BackupEntityWrapper; +import org.polypheny.db.backup.BackupInformationObject; +import org.polypheny.db.catalog.Catalog; +import org.polypheny.db.catalog.entity.LogicalConstraint; +import org.polypheny.db.catalog.entity.logical.LogicalColumn; +import org.polypheny.db.catalog.entity.logical.LogicalEntity; +import org.polypheny.db.catalog.entity.logical.LogicalForeignKey; +import org.polypheny.db.catalog.entity.logical.LogicalNamespace; +import org.polypheny.db.catalog.entity.logical.LogicalPrimaryKey; +import org.polypheny.db.catalog.entity.logical.LogicalTable; +import org.polypheny.db.catalog.exceptions.GenericRuntimeException; +import org.polypheny.db.catalog.logistic.ConstraintType; +import org.polypheny.db.catalog.logistic.DataModel; +import org.polypheny.db.catalog.logistic.EntityType; +import org.polypheny.db.languages.LanguageManager; +import org.polypheny.db.languages.QueryLanguage; +import org.polypheny.db.processing.ImplementationContext.ExecutedContext; +import org.polypheny.db.processing.QueryContext; +import org.polypheny.db.transaction.Statement; +import org.polypheny.db.transaction.Transaction; +import org.polypheny.db.transaction.TransactionException; +import org.polypheny.db.transaction.TransactionManager; +import org.polypheny.db.type.PolyType; + +/** + * This class inserts the schema of the backup into Polypheny-DB + */ +@Slf4j +public class InsertSchema { + + public static final String BACKUP_MANAGER = "Backup Manager"; + private BackupInformationObject backupInformationObject; + private final TransactionManager transactionManager; + + + public InsertSchema( TransactionManager transactionManager ) { + this.transactionManager = transactionManager; + } + + + /** + * Manages the insertion process of the schema + * @param backupInformationObject contains all the metadata of the schema to be inserted + */ + public void start( BackupInformationObject backupInformationObject ) { + log.debug( "insert schemas" ); + this.backupInformationObject = backupInformationObject; + ImmutableMap>> tables; + + insertCreateNamespace( backupInformationObject.getWrappedNamespaces() ); + + Map>> tempMap = new HashMap<>(); + + + insertCreateTable( backupInformationObject.getWrappedTables() ); + + // alter table - add unique constraint + //TODO(FF): only call if there are any relational schemas + insertAlterTableUQ( backupInformationObject.getWrappedTables(), backupInformationObject.getConstraints() ); + insertAlterTableFK( backupInformationObject.getWrappedTables(), backupInformationObject.getForeignKeysPerTable() ); + + // create Collections + insertCreateCollection( backupInformationObject.getWrappedCollections() ); + + //TODO(FF): create something to test that only available data is tried to be inserted + + /* + insertion order (schema): + 1. Relational + 1.1 Namespaces (Create and Alter (owner)) + 1.2 Tables (Create) + 1.2.1 Columns + 1.2.2 Primary Keys + 1.3 Tables (Alter) + 1.3.1 Constraints + 1.3.2 Foreign Keys + 1.3.3 Owner (alter) + 2. Graph + 2.1 Namespaces (Create and Alter (owner)) & case sensitivity? + LogicalNamespace namespace = catalog.getSnapshot().getNamespace( graphName ).orElseThrow(); + useful?? + 3. Document + 3.1 Namespaces (Create and Alter (owner)) + 3.2 Collections + + After Insertion of Entries (provisional, can also be before... but then need to update materialized view, and insertion takes longer (bcs. idx): + 4. Indexes (create) + 5. Views (create) + 6. Materialized Views (create) + */ + + //1.1 Relational Namespaces + } + + + /** + * creates a "create namespace" query and executes it in polypheny for all namespaces that are marked to be inserted and are passed in the namespaces map + * @param namespaces map of namespaces to be inserted, where the key is the namespace id and the value is the wrapped namespace + */ + private void insertCreateNamespace( ImmutableMap> namespaces ) { + String query = new String(); + //TODO(FF): check if namespace already exists, give rename or overwrite option (here, or earlier?), if new name, write it to bupInformationObject + + //TODO(FF): check if namespaces is empty, throw error if it is + for ( Map.Entry> ns : namespaces.entrySet() ) { + //only insert namespaces that are marked to be inserted + if ( ns.getValue().getToBeInserted() ) { + //query = "CREATE " + ns.getValue().getEntityObject().dataModel.toString() + " NAMESPACE " + ns.getValue().getEntityObject().name + ";"; + query = String.format( "CREATE %s NAMESPACE %s", ns.getValue().getEntityObject().dataModel.toString(), ns.getValue().getEntityObject().name ); + + //TODO(FF): execute query in polypheny, alter owner, set case sensitivity (how?) + if ( !ns.getValue().getEntityObject().name.equals( "public" ) ) { + + switch ( ns.getValue().getEntityObject().dataModel ) { + case RELATIONAL: + query = String.format( "CREATE %s NAMESPACE %s", ns.getValue().getEntityObject().dataModel.toString(), ns.getValue().getEntityObject().name ); + executeStatementInPolypheny( query, ns.getKey(), ns.getValue().getEntityObject().dataModel ); + break; + + case DOCUMENT: + query = String.format( "CREATE %s NAMESPACE %s", ns.getValue().getEntityObject().dataModel.toString(), ns.getValue().getEntityObject().name ); + executeStatementInPolypheny( query, ns.getKey(), DataModel.RELATIONAL ); + break; + + case GRAPH: + query = String.format( "CREATE DATABASE %s", ns.getValue().getEntityObject().name ); + executeStatementInPolypheny( query, ns.getKey(), ns.getValue().getEntityObject().dataModel ); + break; + default: + throw new GenericRuntimeException( "During backup schema insertions not supported data model detected" + ns.getValue().getEntityObject().dataModel ); + } + } + } + } + } + + + /** + * Sets an order for a "create table" query and executes it in polypheny for all table in the tables map. The creation query only contains the columns and the primary key + * @param tables map of tables to be inserted, where the key is the namespace id and the value is a list of wrapped tables that are in this namespace + */ + private void insertCreateTable( ImmutableMap>> tables ) { + String query = new String(); + + // key: namespace id, value: list of tables for the namespace + for ( Map.Entry>> tablesPerNs : tables.entrySet() ) { + Long nsID = tablesPerNs.getKey(); + //String namespaceName = bupInformationObject.getBupRelNamespaces().get( nsID ).getNameForQuery(); + //only get rel namespaces from all bup namespaces + String namespaceName = backupInformationObject.getWrappedNamespaces().get( nsID ).getNameForQuery(); + + List> tablesList = tablesPerNs.getValue(); + + // go through each table in the list (of tables for one namespace) + for ( BackupEntityWrapper table : tablesList ) { + // only create tables that should be inserted + if ( table.getToBeInserted() ) { + // only create tables that don't (exist by default in polypheny) + if ( !(table.getEntityObject().entityType.equals( EntityType.SOURCE )) ) { + query = createTableQuery( table, namespaceName ); + executeStatementInPolypheny( query, nsID, DataModel.RELATIONAL ); + } + } + } + } + } + + + /** + * Creates a "alter table" query, that sets a unique constraint on a table, and executes it in polypheny for all constraints in the constraints map + * @param tables map of tables to be altered, where the key is the table id and the value is a list of wrapped tables that are in this namespace + * @param constraints map of constraints to be inserted, where the key is the table id and the value is a list of constraints that are in this table + */ + private void insertAlterTableUQ( ImmutableMap>> tables, ImmutableMap> constraints ) { + String query = new String(); + + for ( Map.Entry>> tablesPerNs : tables.entrySet() ) { + Long nsID = tablesPerNs.getKey(); + String namespaceName = backupInformationObject.getWrappedNamespaces().get( nsID ).getNameForQuery(); + + List> tablesList = tablesPerNs.getValue(); + + // go through each constraint in the list (of tables for one namespace) + for ( BackupEntityWrapper table : tablesList ) { + //TODO:FF (low priority): exclude source tables (for speed) + + // compare the table id with the constraint keys, and if they are the same, create the constraint, and check if it schoult be inserted + if ( (constraints.containsKey( table.getEntityObject().unwrap( LogicalTable.class ).get().getId() )) && table.getToBeInserted() ) { + List constraintsList = constraints.get( table.getEntityObject().unwrap( LogicalTable.class ).get().getId() ); + List logicalColumns = backupInformationObject.getColumns().get( table.getEntityObject().unwrap( LogicalTable.class ).get().getId() ); + + // go through all constraints per table + for ( LogicalConstraint constraint : constraintsList ) { + if ( constraint.type.equals( ConstraintType.UNIQUE ) ) { + String tableName = table.getNameForQuery(); + String constraintName = constraint.name; + String listOfCols = new String(); + + List colIDs = constraint.getKey().fieldIds; + + // get all column-names used in the constraint from the columns + listOfCols = getListOfCol( colIDs, logicalColumns ); + + query = String.format( "ALTER TABLE %s.%s ADD CONSTRAINT %s UNIQUE (%s)", namespaceName, tableName, constraintName, listOfCols ); + log.info( query ); + executeStatementInPolypheny( query, nsID, DataModel.RELATIONAL ); + } + } + } + } + } + } + + + /** + * Creates a "alter table" query, that sets a foreign key constraint on a table, and executes it in polypheny for all foreign keys in the foreignKeysPerTable map + * @param bupTables (deprecated) map of tables to be altered, where the key is the table id and the value is a list of wrapped tables that are in this namespace + * @param foreignKeysPerTable map of foreign keys to be inserted, where the key is the table id and the value is a list of foreign keys that are in this table + */ + private void insertAlterTableFK( ImmutableMap>> bupTables, ImmutableMap> foreignKeysPerTable ) { + String query = new String(); + //if (!foreignKeysPerTable.isEmpty()) { + // go through foreign key constraints and collect the necessary data + for ( Map.Entry> fkListPerTable : foreignKeysPerTable.entrySet() ) { + if ( !(fkListPerTable.getValue().isEmpty()) ) { + Long tableId = fkListPerTable.getKey(); + + for ( LogicalForeignKey foreignKey : fkListPerTable.getValue() ) { + // get the table where the foreign key is saved + Long nsId = foreignKey.namespaceId; + BackupEntityWrapper table = backupInformationObject.getWrappedTables().get( nsId ).stream().filter( e -> e.getEntityObject().unwrap( LogicalTable.class ).orElseThrow().getId() == tableId ).findFirst().orElseThrow(); + //boolean lol = table.getToBeInserted(); + // check if the table is marked to be inserted + if ( table.getToBeInserted() ) { + String namespaceName = backupInformationObject.getWrappedNamespaces().get( foreignKey.namespaceId ).getNameForQuery(); + String tableName = backupInformationObject.getWrappedTables().get( foreignKey.namespaceId ).stream().filter( e -> e.getEntityObject().unwrap( LogicalTable.class ).orElseThrow().getId() == foreignKey.entityId ).findFirst().orElseThrow().getNameForQuery(); + String constraintName = foreignKey.name; + String listOfCols = getListOfCol( foreignKey.fieldIds, backupInformationObject.getColumns().get( foreignKey.entityId ) ); + String referencedNamespaceName = backupInformationObject.getWrappedNamespaces().get( foreignKey.referencedKeyNamespaceId ).getNameForQuery(); + String referencedTableName = backupInformationObject.getWrappedTables().get( foreignKey.referencedKeyNamespaceId ).stream().filter( e -> e.getEntityObject().unwrap( LogicalTable.class ).orElseThrow().getId() == foreignKey.referencedKeyEntityId ).findFirst().orElseThrow().getNameForQuery(); + String referencedListOfCols = getListOfCol( foreignKey.referencedKeyFieldIds, backupInformationObject.getColumns().get( foreignKey.referencedKeyEntityId ) ); + String updateAction = foreignKey.updateRule.name(); + String deleteAction = foreignKey.deleteRule.name(); + //enforcementTime (on commit) - right now is manually set to the same thing everywhere (in the rest of polypheny) + + query = String.format( "ALTER TABLE %s.%s ADD CONSTRAINT %s FOREIGN KEY (%s) REFERENCES %s.%s (%s) ON UPDATE %s ON DELETE %s", namespaceName, tableName, constraintName, listOfCols, referencedNamespaceName, referencedTableName, referencedListOfCols, updateAction, deleteAction ); + log.info( query ); + executeStatementInPolypheny( query, nsId, DataModel.RELATIONAL ); + } + } + } + + } + //} + + } + + + /** + * Creates a "create collecton" query and executes it in polypheny for all collections in the wrappedCollections map + * @param wrappedCollections map of collections to be inserted, where the key is the namespace id and the value is a list of wrapped collections that are in this namespace + */ + private void insertCreateCollection( ImmutableMap>> wrappedCollections ) { + String query = new String(); + + //FIXME(FF): collections are not wrapped yet!! + // go through all collections per namespace and create and execute a query + for ( Map.Entry>> collectionsPerNs : wrappedCollections.entrySet() ) { + Long nsIDOriginal = collectionsPerNs.getKey(); + String namespaceName = backupInformationObject.getWrappedNamespaces().get( nsIDOriginal ).getNameForQuery(); + long nsId = Catalog.snapshot().getNamespace( namespaceName ).orElseThrow().id; + + List> collectionsList = collectionsPerNs.getValue(); + + // go through each collection in the list (of collections for one namespace) + for ( BackupEntityWrapper collection : collectionsList ) { + // only create collections that should be inserted + if ( collection.getToBeInserted() ) { + // only create tables that don't (exist by default in polypheny) + query = String.format( "db.createCollection(\"%s\")", collection.getNameForQuery() ); + log.info( query ); + executeStatementInPolypheny( query, nsId, DataModel.DOCUMENT ); + } + } + } + //db.createCollection('users') + //executeStatementInPolypheny( "db.createCollection(\"users\")", Catalog.defaultNamespaceId, DataModel.DOCUMENT ); + } + + + /** + * Gets a list of the column names (seperated by ", ") without brackets + * + * @param colIDs list of column ids from which the name is wanted + * @param logicalColumns list of all logical columns + * @return list, seperated by a semicolon, with the names of the wanted columns (from the ids) + */ + private String getListOfCol( List colIDs, List logicalColumns ) { + String listOfCols = new String(); + + for ( Long colID : colIDs ) { + String colName = logicalColumns.stream().filter( e -> e.getId() == colID ).findFirst().get().getName(); + listOfCols = listOfCols + colName + ", "; + + } + if ( listOfCols.length() > 0 ) { + listOfCols = listOfCols.substring( 0, listOfCols.length() - 2 ); // remove last ", " + } + + return listOfCols; + } + + + /** + * Creates a "create table" query for one table and returns it, the query only contains the columns and the primary key + * @param table wrapped table to be inserted + * @param namespaceName name of the namespace of the table + * @return the query to create the table + */ + private String createTableQuery( BackupEntityWrapper table, String namespaceName ) { + String query = new String(); + String columnDefinitions = ""; + String pkConstraint = ""; + ImmutableMap> columns = backupInformationObject.getColumns(); + ImmutableMap> primaryKeys = backupInformationObject.getPrimaryKeysPerTable(); + LogicalTable logicalTable = table.getEntityObject().unwrap( LogicalTable.class ).orElseThrow(); + Long tableID = logicalTable.getId(); + List colsPerTable = columns.get( tableID ); + List pksPerTable = primaryKeys.get( tableID ); + + // create the column defintion statement for the table + for ( LogicalColumn col : colsPerTable ) { + columnDefinitions = columnDefinitions + createColumnDefinition( col ); + //log.info( columnDefinitions ); + } + if ( !columnDefinitions.isEmpty() ) { + columnDefinitions = columnDefinitions.substring( 0, columnDefinitions.length() - 2 ); // remove last ", " + } + + // create the primary key constraint statement for the table + if ( !(pksPerTable.isEmpty()) ) { + String listOfCols = new String(); + for ( long columnId : pksPerTable.get( 0 ).fieldIds ) { + String colName = colsPerTable.stream().filter( e -> e.getId() == columnId ).findFirst().get().getName(); + listOfCols = listOfCols + colName + ", "; + } + if ( !listOfCols.isEmpty() ) { + listOfCols = listOfCols.substring( 0, listOfCols.length() - 2 ); // remove last ", " + pkConstraint = ", PRIMARY KEY (" + listOfCols + ")"; + } + } + + //query to create one table (from the list of tables, from the list of namespaces) + //TODO(FF): ON STORE storename PARTITION BY partionionInfo + //query = String.format( "CREATE TABLE %s.%s (%s, %s)", namespaceName, table.getNameForQuery(), columnDefinitions, pkConstraint ); + query = String.format( "CREATE TABLE %s.%s (%s%s)", namespaceName, table.getNameForQuery(), columnDefinitions, pkConstraint ); + log.info( query ); + + return query; + } + + + /** + * Creates the column definition part of a "create table" query + * @param col logical column for which the column definition should be created + * @return the column definition part of a "create table" query + */ + private String createColumnDefinition( LogicalColumn col ) { + String columnDefinitionString = new String(); + String colName = col.getName(); + String colDataType = col.getType().toString(); + String colNullable = ""; + String defaultValue = new String(); + if ( !(col.defaultValue == null) ) { + + //replace ' to '', in case there is a " in the default value + String value = col.defaultValue.value.toJson(); + value = value.replaceAll( "'", "''" ); + + if ( PolyType.CHAR_TYPES.contains( col.defaultValue.type ) || PolyType.DATETIME_TYPES.contains( col.defaultValue.type ) ) { + //defaultValue = String.format( " DEFAULT '%s'", regexString ); + defaultValue = String.format( " DEFAULT '%s'", value ); + //String test = " DEFAULT '" + regexString + "'"; + String test = " DEFAULT '" + value + "'"; + } else { + defaultValue = String.format( " DEFAULT %s", value ); + } + //defaultValue = String.format( " DEFAULT %s", col.defaultValue.value ); + //log.info( "default for " + colDataType + ": " + value2 + " || " + value3); + //log.info( "default for " + colDataType + ": " + value); + } + + String caseSensitivity = new String(); + if ( !(col.collation == null) ) { + // Remove the "_" from the collation enum standard + String collation = col.collation.toString().replaceAll( "_", " " ); + caseSensitivity = String.format( "COLLATE %s", collation); + } + + if ( col.nullable ) { + colNullable = ""; + } else if ( !col.nullable ) { + colNullable = " NOT NULL "; + } else { + throw new GenericRuntimeException( "During backup schema insertions not supported nullable value detected" + colNullable ); + } + + String dataTypeString = new String(); + switch ( colDataType ) { + case "BIGINT": + case "BOOLEAN": + case "DOUBLE": + case "INTEGER": + case "REAL": + case "SMALLINT": + case "TINYINT": + case "DATE": + case "AUDIO": + case "FILE": + case "IMAGE": + case "VIDEO": + dataTypeString = colDataType; + break; + + case "TIME": + case "TIMESTAMP": + if ( !(col.length == null) ) { + dataTypeString = String.format( "%s(%s) ", colDataType, col.length.toString() ); + } else { + dataTypeString = colDataType; + } + break; + + case "VARCHAR": + dataTypeString = String.format( "%s(%s) ", colDataType, col.length.toString() ); + break; + + case "DECIMAL": + if ( !(col.length == null) ) { + if ( !(col.scale == null) ) { + dataTypeString = String.format( "%s(%s, %s) ", colDataType, col.length.toString(), col.scale.toString() ); + } else { + dataTypeString = String.format( "%s(%s) ", colDataType, col.length.toString() ); + } + } else { + dataTypeString = colDataType; + } + break; + + default: + throw new GenericRuntimeException( "During backup schema insertions not supported datatype detected" + colDataType ); + } + + String arrayString = new String(); + + if ( !(col.collectionsType == null) ) { + String collectionsType = col.collectionsType.toString(); + + switch ( collectionsType ) { + case "ARRAY": + arrayString = String.format( " ARRAY (%s, %s) ", col.dimension.toString(), col.cardinality.toString() ); + break; + + default: + throw new GenericRuntimeException( "During backup schema insertions not supported collectionstype detected" + collectionsType ); + } + } + + columnDefinitionString = String.format( "%s %s%s%s%s %s, ", colName, dataTypeString, arrayString, colNullable, defaultValue, caseSensitivity ); + //log.info( columnDefinitionString ); + + return columnDefinitionString; + } + + + //(implemented at other place, but may change there. This is kept for reference) + /* + private String collationToString( Collation collation ) { + try { + if (collation.equals( Collation.CASE_SENSITIVE )) { + return "CASE SENSITIVE"; + } + if ( collation.equals( Collation.CASE_INSENSITIVE ) ) { + return "CASE INSENSITIVE"; + } + else { + throw new RuntimeException( "Collation not supported" ); + } + } catch ( Exception e ) { + throw new RuntimeException( e ); + } + } + */ + + + //(implemented at other place, but may change there. This is kept for reference) + /* + private String nullableBoolToString (boolean nullable) { + if (nullable) { + return "NULL"; + } else { + return "NOT NULL"; + } + } + */ + + + /** + * Executes a query in polypheny + * @param query query to be executed + * @param namespaceId namespace id of where the query should be executed + * @param dataModel data model of the query + */ + private void executeStatementInPolypheny( String query, Long namespaceId, DataModel dataModel ) { + log.info( "entered execution with query:" + query ); + Transaction transaction; + Statement statement = null; + PolyImplementation result; + + //TODO: use anyquery, rest not necessary + + switch ( dataModel ) { + case RELATIONAL: + try { + // get a transaction and a statement + transaction = transactionManager.startTransaction( Catalog.defaultUserId, false, "Backup Inserter" ); + statement = transaction.createStatement(); + ExecutedContext executedQuery = LanguageManager.getINSTANCE().anyQuery( + QueryContext.builder() + .language( QueryLanguage.from( "sql" ) ) + .query( query ) + .origin( BACKUP_MANAGER ) + .transactionManager( transactionManager ) + .namespaceId( namespaceId ) + .statement( statement ) + .build() + .addTransaction( transaction ) ).get( 0 ); + transaction.commit(); + + } catch ( Exception e ) { + throw new GenericRuntimeException( "Error while starting transaction: " + e.getMessage() ); + } catch ( TransactionException e ) { + throw new GenericRuntimeException( "Error while starting transaction: " + e.getMessage() ); + } + break; + + case DOCUMENT: + try { + // get a transaction and a statement + transaction = transactionManager.startTransaction( Catalog.defaultUserId, false, "Backup Inserter" ); + statement = transaction.createStatement(); + ExecutedContext executedQuery = LanguageManager.getINSTANCE() + .anyQuery( + QueryContext.builder() + .language( QueryLanguage.from( "mql" ) ) + .query( query ) + .origin( BACKUP_MANAGER ) + .transactionManager( transactionManager ) + .namespaceId( namespaceId ) + .statement( statement ) + .build() + .addTransaction( transaction ) ).get( 0 ); + transaction.commit(); + + } catch ( Exception e ) { + throw new GenericRuntimeException( "Error while starting transaction" + e.getMessage() ); + } catch ( TransactionException e ) { + throw new GenericRuntimeException( "Error while starting transaction" + e.getMessage() ); + } + break; + + case GRAPH: + try { + // get a transaction and a statement + transaction = transactionManager.startTransaction( Catalog.defaultUserId, false, "Backup Inserter" ); + statement = transaction.createStatement(); + ExecutedContext executedQuery = LanguageManager.getINSTANCE() + .anyQuery( + QueryContext.builder().language( QueryLanguage.from( "cypher" ) ) + .query( query ) + .origin( BACKUP_MANAGER ) + .transactionManager( transactionManager ) + .namespaceId( namespaceId ) + .statement( statement ) + .build() + .addTransaction( transaction ) ).get( 0 ); + transaction.commit(); + + } catch ( Exception e ) { + throw new GenericRuntimeException( "Error while starting transaction" + e.getMessage() ); + } catch ( TransactionException e ) { + throw new GenericRuntimeException( "Error while starting transaction" + e.getMessage() ); + } + break; + + default: + throw new RuntimeException( "Backup - InsertSchema: DataModel not supported" ); + } + + //just to keep it + int i = 1; + if ( i == 0 ) { + try { + // get a transaction and a statement + transaction = transactionManager.startTransaction( Catalog.defaultUserId, false, "Backup Inserter" ); + statement = transaction.createStatement(); + ExecutedContext executedQuery = LanguageManager.getINSTANCE() + .anyQuery( + QueryContext.builder() + .language( QueryLanguage.from( "sql" ) ) + .query( query ) + .origin( BACKUP_MANAGER ) + .transactionManager( transactionManager ) + .namespaceId( Catalog.defaultNamespaceId ) + .statement( statement ) + .build() + .addTransaction( transaction ) ).get( 0 ); + // in case of results + ResultIterator iter = executedQuery.getIterator(); + while ( iter.hasMoreRows() ) { + // liste mit tuples + iter.getNextBatch(); + } + + } catch ( Exception e ) { + throw new GenericRuntimeException( "Error while starting transaction" + e.getMessage() ); + } + } + + } + +} diff --git a/dbms/src/main/java/org/polypheny/db/backup/datasaver/BackupFileReader.java b/dbms/src/main/java/org/polypheny/db/backup/datasaver/BackupFileReader.java new file mode 100644 index 0000000000..044cffe338 --- /dev/null +++ b/dbms/src/main/java/org/polypheny/db/backup/datasaver/BackupFileReader.java @@ -0,0 +1,74 @@ +/* + * Copyright 2019-2023 The Polypheny Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.polypheny.db.backup.datasaver; + +import java.io.BufferedInputStream; +import java.io.BufferedReader; +import java.io.File; +import java.io.FileInputStream; +import java.io.InputStreamReader; +import org.polypheny.db.catalog.exceptions.GenericRuntimeException; + +/** + * Reader that streams data from a file + */ +public class BackupFileReader { + + File file; + BufferedReader in; + + + /** + * Creates a new BackupFileReader, that reads from a file + * @param file file to read from + */ + public BackupFileReader( File file ) { + this.file = file; + try { + this.in = new BufferedReader( new InputStreamReader( new BufferedInputStream( new FileInputStream( file ), 32768 ) ) ); + } catch ( Exception e ) { + throw new GenericRuntimeException( "Couldn't open file " + file.getName() + " " + e.getMessage() ); + } + } + + + /** + * Reads a line from the file + * @return returns the read line as a String + */ + public String readLine() { + try { + return in.readLine(); + } catch ( Exception e ) { + throw new GenericRuntimeException( "Couldn't read from file " + file.getName() + " " + e.getMessage() ); + } + } + + + /** + * Closes the reader + */ + public void close() { + try { + in.close(); + } catch ( Exception e ) { + throw new GenericRuntimeException( "Couldn't close file " + file.getName() + " " + e.getMessage() ); + } + } + + +} diff --git a/dbms/src/main/java/org/polypheny/db/backup/datasaver/BackupFileWriter.java b/dbms/src/main/java/org/polypheny/db/backup/datasaver/BackupFileWriter.java new file mode 100644 index 0000000000..13f37969da --- /dev/null +++ b/dbms/src/main/java/org/polypheny/db/backup/datasaver/BackupFileWriter.java @@ -0,0 +1,114 @@ +/* + * Copyright 2019-2023 The Polypheny Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.polypheny.db.backup.datasaver; + +import java.io.BufferedOutputStream; +import java.io.BufferedWriter; +import java.io.File; +import java.io.FileNotFoundException; +import java.io.FileOutputStream; +import java.io.IOException; +import java.io.OutputStreamWriter; +import org.polypheny.db.catalog.exceptions.GenericRuntimeException; + +/** + * Writer that writes data to a file + */ +public class BackupFileWriter { + File file; + BufferedWriter out; + + + /** + * Creates a new BackupFileWriter, that writes to a file + * @param file file to write to + */ + public BackupFileWriter( File file) { + + this.file = file; + try { + this.out = new BufferedWriter( new OutputStreamWriter( new BufferedOutputStream( new FileOutputStream( file ), 32768 ) ) ); + } catch ( Exception e ) { + throw new GenericRuntimeException( "Couldn't open file " + file.getName() + " " + e.getMessage() ); + } + } + + + /** + * Writes a line to the file (creates a new line after the line) + * @param string String to write + */ + public void writeLine( String string ) { + try { + out.write( string ); + out.newLine(); + } catch ( Exception e ) { + throw new GenericRuntimeException( "Couldn't write to file " + file.getName() + " " + e.getMessage() ); + } + } + + + /** + * Writes to the file (doesn't create a new line after the line) + * @param string String to write + */ + public void write ( String string ) { + try { + out.write( string ); + } catch ( IOException e ) { + throw new GenericRuntimeException( "Couldn't write to file " + file.getName() + " " + e.getMessage() ); + } + + } + + + /** + * Creates a new line in the file + */ + public void newLine () { + try { + out.newLine(); + } catch ( IOException e ) { + throw new GenericRuntimeException( "Couldn't write to file " + file.getName() + " " + e.getMessage() ); + } + } + + + /** + * Flushes the writer + */ + public void flush () { + try { + out.flush(); + } catch ( IOException e ) { + throw new GenericRuntimeException( "Couldn't flush file " + file.getName() + " " + e.getMessage() ); + } + } + + + /** + * Closes the writer + */ + public void close () { + try { + out.close(); + } catch ( IOException e ) { + throw new GenericRuntimeException( "Couldn't close file " + file.getName() + " " + e.getMessage() ); + } + } + +} diff --git a/dbms/src/main/java/org/polypheny/db/backup/datasaver/manifest/BackupManifest.java b/dbms/src/main/java/org/polypheny/db/backup/datasaver/manifest/BackupManifest.java new file mode 100644 index 0000000000..0911217647 --- /dev/null +++ b/dbms/src/main/java/org/polypheny/db/backup/datasaver/manifest/BackupManifest.java @@ -0,0 +1,47 @@ +/* + * Copyright 2019-2023 The Polypheny Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.polypheny.db.backup.datasaver.manifest; + + +import java.util.Date; +import java.util.List; +import lombok.Getter; +import lombok.Setter; + +/** + * This class represents the manifest file that is created during a backup. + */ +@Getter @Setter +public class BackupManifest { + private List entityInfos; + private String overallChecksum; + private Date backupDate; + + + /** + * The manifest contains information about the saved data from the backup + * @param entityInfos list of entityInfos - Information about each saved entity in the backup + * @param overallChecksum checksum of the whole backup (not implemented yet) + * @param backupDate date when the backup was created + */ + public BackupManifest( List entityInfos, String overallChecksum, Date backupDate ) { + this.entityInfos = entityInfos; + this.overallChecksum = overallChecksum; + this.backupDate = backupDate; + } + +} diff --git a/dbms/src/main/java/org/polypheny/db/backup/datasaver/manifest/BackupManifestGenerator.java b/dbms/src/main/java/org/polypheny/db/backup/datasaver/manifest/BackupManifestGenerator.java new file mode 100644 index 0000000000..f453438f46 --- /dev/null +++ b/dbms/src/main/java/org/polypheny/db/backup/datasaver/manifest/BackupManifestGenerator.java @@ -0,0 +1,55 @@ +/* + * Copyright 2019-2023 The Polypheny Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.polypheny.db.backup.datasaver.manifest; + +import com.google.gson.Gson; +import java.io.File; +import java.io.FileWriter; +import java.io.IOException; +import java.util.Date; +import java.util.List; +import org.polypheny.db.catalog.exceptions.GenericRuntimeException; + +/** + * This class generates the manifest file that is created during a backup. + */ +public class BackupManifestGenerator { + + + /** + * The manifest contains information about the saved data from the backup + * @param entityInfoList list of entityInfos - Information about each saved entity in the backup + * @param overallChecksum checksum of the whole backup (not implemented yet) + * @param manifestPath path where the manifest file should be saved + * @param backupDate date when the backup was created + */ + public static void generateManifest( List entityInfoList, String overallChecksum, File manifestPath, Date backupDate ) { + BackupManifest backupManifest = new BackupManifest(entityInfoList, overallChecksum, backupDate); + Gson gson = new Gson(); + String json = gson.toJson(backupManifest); + + // TODO(FF): change how to write to file (use polyphenywriter thing) + try (FileWriter writer = new FileWriter(manifestPath);) + { + writer.write(json); + writer.flush(); + } catch ( IOException e) { + throw new GenericRuntimeException("Error while writing manifest file" + e.getMessage()); + } + } + +} diff --git a/dbms/src/main/java/org/polypheny/db/backup/datasaver/manifest/EntityInfo.java b/dbms/src/main/java/org/polypheny/db/backup/datasaver/manifest/EntityInfo.java new file mode 100644 index 0000000000..6b9938edaf --- /dev/null +++ b/dbms/src/main/java/org/polypheny/db/backup/datasaver/manifest/EntityInfo.java @@ -0,0 +1,74 @@ +/* + * Copyright 2019-2023 The Polypheny Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.polypheny.db.backup.datasaver.manifest; + +import java.nio.file.Path; +import java.util.List; +import lombok.Getter; +import lombok.Setter; +import org.polypheny.db.catalog.logistic.DataModel; + +/** + * This class saves information to be saved in the manifest, about an entity that is saved in a backup. + */ +@Getter @Setter +public class EntityInfo { + private List filePaths; + private String entityName; + private String namespaceName; + private Long namespaceId; + private DataModel dataModel; + private int nbrCols; + private String checksum; + + + /** + * The entityInfo contains information (that should be saved in the manifest) about an entity that is saved in a backup. + * @param filePaths List of paths (from backup/data/...) to the file where the entry-data is saved. If the entity is saved in multiple files, the list contains multiple paths. + * @param entityName Name of the entity with which it should be inserted + * @param namespaceName Name of the namespace of the entity + * @param namespaceId Id of the namespace of the entity + * @param dataModel DataModel of the entity + */ + public EntityInfo( List filePaths, String entityName, String namespaceName, Long namespaceId, DataModel dataModel ) { + this.filePaths = filePaths; + this.entityName = entityName; + this.namespaceName = namespaceName; + this.namespaceId = namespaceId; + this.dataModel = dataModel; + } + + + /** + The entityInfo contains information (that should be saved in the manifest) about an entity that is saved in a backup. + * @param filePaths List of paths (from backup/data/...) to the file where the entry-data is saved. If the entity is saved in multiple files, the list contains multiple paths. + * @param entityName Name of the entity with which it should be inserted + * @param namespaceName Name of the namespace of the entity + * @param namespaceId Id of the namespace of the entity + * @param dataModel DataModel of the entity + * @param nbrCols Number of columns of the entity (for tables) + */ + public EntityInfo( List filePaths, String entityName, String namespaceName, Long namespaceId, DataModel dataModel, int nbrCols ) { + this.filePaths = filePaths; + this.entityName = entityName; + this.namespaceName = namespaceName; + this.namespaceId = namespaceId; + this.dataModel = dataModel; + this.nbrCols = nbrCols; + } + +} diff --git a/dbms/src/main/java/org/polypheny/db/backup/datasaver/manifest/ManifestReader.java b/dbms/src/main/java/org/polypheny/db/backup/datasaver/manifest/ManifestReader.java new file mode 100644 index 0000000000..f9a59329ea --- /dev/null +++ b/dbms/src/main/java/org/polypheny/db/backup/datasaver/manifest/ManifestReader.java @@ -0,0 +1,51 @@ +/* + * Copyright 2019-2023 The Polypheny Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.polypheny.db.backup.datasaver.manifest; + +import com.google.gson.Gson; +import java.io.FileNotFoundException; +import java.io.FileReader; +import java.io.IOException; +import java.io.Reader; +import org.polypheny.db.catalog.exceptions.GenericRuntimeException; + +/** + * Transforms the manifest file from the backup into a BackupManifest object + */ +public class ManifestReader { + + /** + * Reads a manifest file and returns the BackupManifest object + * @param manifestFilePath path to the manifest file + * @return BackupManifest object + */ + public BackupManifest readManifest( String manifestFilePath ) { + try ( + Reader reader = new FileReader( manifestFilePath ); + ) + { + Gson gson = new Gson(); + return gson.fromJson( reader, BackupManifest.class ); + + } catch ( FileNotFoundException e ) { + throw new GenericRuntimeException( "Manifest was not found" + e.getMessage()); + } catch ( IOException e ) { + throw new GenericRuntimeException( "Couldn't read manifest" + e.getMessage() ); + } + } + +} diff --git a/dbms/src/main/java/org/polypheny/db/backup/dependencies/BackupEntityType.java b/dbms/src/main/java/org/polypheny/db/backup/dependencies/BackupEntityType.java new file mode 100644 index 0000000000..1187331a3b --- /dev/null +++ b/dbms/src/main/java/org/polypheny/db/backup/dependencies/BackupEntityType.java @@ -0,0 +1,34 @@ +/* + * Copyright 2019-2023 The Polypheny Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.polypheny.db.backup.dependencies; + +/** + * Enum that represents the different types of entities that can be backed up. (Used for dependencies/Referencers) + */ +public enum BackupEntityType { + NAMESPACE( 1 ), + TABLE( 2 ); + //SOURCE( 3 ), + //VIEW( 4 ), + //MATERIALIZED_VIEW( 5 ); + + + private final int id; + + + BackupEntityType( int id ) { this.id = id; } +} diff --git a/dbms/src/main/java/org/polypheny/db/backup/dependencies/DependencyAssembler.java b/dbms/src/main/java/org/polypheny/db/backup/dependencies/DependencyAssembler.java new file mode 100644 index 0000000000..22149f78ea --- /dev/null +++ b/dbms/src/main/java/org/polypheny/db/backup/dependencies/DependencyAssembler.java @@ -0,0 +1,62 @@ +/* + * Copyright 2019-2023 The Polypheny Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.polypheny.db.backup.dependencies; + +import java.util.ArrayList; +import java.util.List; +import java.util.stream.Collectors; + +/** + * Class that assembles the dependencies of a given entity. + */ +public class DependencyAssembler { + + /** + * Returns a list of all entities that depend on the given entity. + * @param visited List of already visited entities (already checked the follow up dependencies, to manage cycles) + * @param currentEntity The entity to check the dependencies for + * @param allTableReferencers List of all entityReferencers for tables that are in the backupInformationObject + * @return List of all entities (resp. their id's) that depend on the given entity + */ + public List getDependencies( List visited, EntityReferencer currentEntity, List allTableReferencers ) { + if ( (visited.contains( currentEntity.getEntityId() )) || (currentEntity.getReferencerTables().isEmpty() && currentEntity.getReferencerNamespaces().isEmpty()) ) { + return visited; + } else { + visited.add( currentEntity.getEntityId() ); + + + // List referencerTables = allTableReferencers.stream().filter( entityReferencer -> entityReferencer.getEntityId().equals( currentEntity.getEntityId() ) ).toList(); + /* + List referencerTables = new ArrayList<>(); + for ( EntityReferencer entityReferencer : allTableReferencers ) { + if ( currentEntity.getReferencerTables().contains( entityReferencer.getEntityId() ) ) { + referencerTables.add( entityReferencer ); + } + } + */ + + List referencerTables = allTableReferencers.stream().filter( e -> e.getEntityId().equals( currentEntity.getEntityId() )).collect( Collectors.toList()); + for ( EntityReferencer nextEntity : referencerTables ) { + visited = getDependencies( visited, nextEntity, allTableReferencers ); + } + + return visited; + } + + } + +} diff --git a/dbms/src/main/java/org/polypheny/db/backup/dependencies/DependencyManager.java b/dbms/src/main/java/org/polypheny/db/backup/dependencies/DependencyManager.java new file mode 100644 index 0000000000..b135bd81c4 --- /dev/null +++ b/dbms/src/main/java/org/polypheny/db/backup/dependencies/DependencyManager.java @@ -0,0 +1,77 @@ +/* + * Copyright 2019-2023 The Polypheny Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.polypheny.db.backup.dependencies; + +import java.util.ArrayList; +import java.util.List; +import org.polypheny.db.backup.BackupInformationObject; +import org.polypheny.db.util.Pair; + +/** + * Initiates and handles the search for all "follow up" dependencies of a given entityReferencer (only works for tables at them moment) + */ +public class DependencyManager { + + + /** + * Returns a list of all entities referenced by the entityReferencer, and all entities referenced by those entities, and so on (calls recursive function getDependencies) + * @param entityReferencer the entityReferencer for which to find all entities that reference it + * @param allTableReferencers a list of all entityReferencers in the backupInformationObject + * @return a list of all entity id's that reference the entityReferencer, and their follow up dependencies + */ + public List getReferencedEntities( EntityReferencer entityReferencer, List allTableReferencers ) { + DependencyAssembler dependencyAssembler = new DependencyAssembler(); + List visited = new ArrayList<>(); + //fixme: check if entityReferencer is empty/null + + if ( entityReferencer.getEntityType().equals( BackupEntityType.NAMESPACE ) ) { + //TODO(FF): implement - same as the rest, but remove (or handle differently) first element (first element should be the namespace itself) + // go through all tables referenced by namespace, do recursive function for each table, but manually check here if it is already visited for the outermost layer + + + //collect all tables from referencerNamespaceTablePairs where the key is the id of the entityReferencer + List referencerTables = new ArrayList<>(); + for ( EntityReferencer namespaceReferencer : allTableReferencers ) { + for ( Pair pair : namespaceReferencer.getReferencerNamespaceTablePairs() ) { + if ( pair.left.equals( entityReferencer.getEntityId() ) ) { + Long tableId = pair.right; + //find the entityReferencer for the tableId in allTableReferencers + for ( EntityReferencer tableReferencer : allTableReferencers ) { + if ( tableReferencer.getEntityId().equals( tableId ) ) { + referencerTables.add( tableReferencer ); + } + } + } + } + } + + for ( EntityReferencer nextEntity : referencerTables ) { + visited = dependencyAssembler.getDependencies( visited, nextEntity, allTableReferencers ); + } + return entityReferencer.getReferencerNamespaces(); + + + } else if ( entityReferencer.getEntityType().equals( BackupEntityType.TABLE ) ) { + return dependencyAssembler.getDependencies( visited, entityReferencer, allTableReferencers ); + } else { + throw new RuntimeException( "Unknown entity type" ); + } + } + + + +} diff --git a/dbms/src/main/java/org/polypheny/db/backup/dependencies/EntityReferencer.java b/dbms/src/main/java/org/polypheny/db/backup/dependencies/EntityReferencer.java new file mode 100644 index 0000000000..eb3f56b6cb --- /dev/null +++ b/dbms/src/main/java/org/polypheny/db/backup/dependencies/EntityReferencer.java @@ -0,0 +1,66 @@ +/* + * Copyright 2019-2023 The Polypheny Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.polypheny.db.backup.dependencies; + +import java.util.List; +import lombok.Getter; +import lombok.Setter; +import org.polypheny.db.util.Pair; + +/** + * Class that contains entities and by what they are referenced + */ +public class EntityReferencer { + @Getter + private Long entityId; + @Getter + private BackupEntityType entityType; + @Getter @Setter + private List referencerNamespaces; //TODO(FF): is self included? + @Getter @Setter + private List referencerTables; + @Getter @Setter + private List> referencerNamespaceTablePairs; + + + /** + * Creates an entityReferencer, which contains the id and type of the entity and the ids of the entities that reference it ("namespace", "table" are the only implemented ones at the moment) + * @param entityId The id of the entity that is referenced by another entity + * @param entityType The type of the entity + */ + public EntityReferencer(Long entityId, BackupEntityType entityType) { + this.entityId = entityId; + this.entityType = entityType; + } + + + /** + * Checks if the entity is referenced by another entity + * @param entityId The id of the entity you want to check whether it is referenced by another entity + * @param entityType The type of the entity to be checked + * @return true if the entity is referenced by another entity, false if not + */ + public Boolean isReferenced(Long entityId, BackupEntityType entityType) { + if (referencerNamespaces.isEmpty() && referencerTables.isEmpty()) { + return false; + } + else { + return true; + } + } + +} diff --git a/dbms/src/main/java/org/polypheny/db/backup/webui/BackupCrud.java b/dbms/src/main/java/org/polypheny/db/backup/webui/BackupCrud.java new file mode 100644 index 0000000000..c4d7797c9e --- /dev/null +++ b/dbms/src/main/java/org/polypheny/db/backup/webui/BackupCrud.java @@ -0,0 +1,98 @@ +/* + * Copyright 2019-2023 The Polypheny Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.polypheny.db.backup.webui; + +import io.javalin.http.Context; +import java.time.Instant; +import java.util.List; +import org.polypheny.db.backup.BackupManager; +import org.polypheny.db.backup.webui.models.ElementModel; +import org.polypheny.db.backup.webui.models.ManifestModel; +import org.polypheny.db.backup.webui.models.StatusModel; +import org.polypheny.db.backup.webui.models.StatusModel.Code; +import org.polypheny.db.type.entity.temporal.PolyTimestamp; +import org.polypheny.db.webui.ConfigService.HandlerType; +import org.polypheny.db.webui.HttpServer; + +public class BackupCrud { + + private final BackupManager backupManager; + + + public BackupCrud( BackupManager manager ) { + this.backupManager = manager; + + registerBackupRoutes(); + } + + + private void registerBackupRoutes() { + HttpServer server = HttpServer.getInstance(); + final String PATH = "/backup/v1"; + + server.addSerializedRoute( PATH + "/createBackup", this::createBackup, HandlerType.POST ); + + server.addSerializedRoute( PATH + "/getCurrentStructure", this::getCurrentStructure, HandlerType.GET ); + + server.addSerializedRoute( PATH + "/restoreBackup", this::restoreBackup, HandlerType.POST ); + + server.addSerializedRoute( PATH + "/deleteBackup", this::deleteBackup, HandlerType.POST ); + + server.addSerializedRoute( PATH + "/getBackups", this::getBackups, HandlerType.GET ); + } + + + private void getBackups( Context context ) { + //context.json( backupManager.getBackups().stream().map(b -> ManifestModel.from(Backup) ).collect( Collectors::toList)); todo ff enable after implementing mupltiple backups in BackupManager + + context.json( List.of( + ManifestModel.getDummy(), + new ManifestModel( 1, List.of(), new PolyTimestamp( Instant.now().toEpochMilli() ) ) ) ); + } + + + private void deleteBackup( Context context ) { + Long backupId = context.bodyAsClass( Long.class ); + + // backupManager.deleteBackup( backupId ); todo ff enable after implementing in BackupManager + context.json( new StatusModel( Code.SUCCESS, "Backup deleted" ) ); + } + + + private void restoreBackup( Context context ) { + ManifestModel manifestModel = context.bodyAsClass( ManifestModel.class ); + + // backupManager.restoreBackup( backupId ); todo ff enable after implementing in BackupManager + context.json( new StatusModel( Code.SUCCESS, "Backup restored" ) ); + } + + + private void createBackup( Context context ) { + List elements = context.bodyAsClass( List.class ); + + // backupManager.createBackup( elements ); todo ff enable after implementing in BackupManager + context.json( new StatusModel( Code.SUCCESS, "Backup created" ) ); + } + + + public void getCurrentStructure( Context context ) { + // context.json( ElementModel.fromBackupObject( backupManager.getBackupInformationObject() ) ); todo ff enable after implementing in ElementModel + context.json( ManifestModel.getDummy().elements() ); + } + + +} diff --git a/core/src/main/java/org/polypheny/db/algebra/enumerable/package-info.java b/dbms/src/main/java/org/polypheny/db/backup/webui/models/BackupType.java similarity index 60% rename from core/src/main/java/org/polypheny/db/algebra/enumerable/package-info.java rename to dbms/src/main/java/org/polypheny/db/backup/webui/models/BackupType.java index aed8fdf9b0..34ac8caabc 100644 --- a/core/src/main/java/org/polypheny/db/algebra/enumerable/package-info.java +++ b/dbms/src/main/java/org/polypheny/db/backup/webui/models/BackupType.java @@ -1,6 +1,5 @@ - /* - * Copyright 2019-2024 The Polypheny Project + * Copyright 2019-2023 The Polypheny Project * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -15,9 +14,21 @@ * limitations under the License. */ -/** - * Query optimizer rules for Java calling convention. - */ +package org.polypheny.db.backup.webui.models; + +import java.util.List; + + +public enum BackupType { + NONE, + SCHEMA, + DATA( SCHEMA ), + STORAGE_CONFIGURATION( SCHEMA, DATA ); + + public final List dependsOn; -package org.polypheny.db.algebra.enumerable; + BackupType( BackupType... dependsOn ) { + this.dependsOn = List.of( dependsOn ); + } +} diff --git a/dbms/src/main/java/org/polypheny/db/backup/webui/models/ElementModel.java b/dbms/src/main/java/org/polypheny/db/backup/webui/models/ElementModel.java new file mode 100644 index 0000000000..414cbc9e41 --- /dev/null +++ b/dbms/src/main/java/org/polypheny/db/backup/webui/models/ElementModel.java @@ -0,0 +1,289 @@ +/* + * Copyright 2019-2023 The Polypheny Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.polypheny.db.backup.webui.models; + + +import com.fasterxml.jackson.annotation.JsonCreator; +import com.fasterxml.jackson.annotation.JsonProperty; +import com.fasterxml.jackson.core.JacksonException; +import com.fasterxml.jackson.core.JsonParser; +import com.fasterxml.jackson.databind.DeserializationContext; +import com.fasterxml.jackson.databind.JsonDeserializer; +import com.fasterxml.jackson.databind.annotation.JsonDeserialize; +import java.io.IOException; +import java.util.ArrayList; +import java.util.List; +import java.util.stream.Collectors; +import javax.annotation.Nonnull; +import javax.annotation.Nullable; +import lombok.Value; +import org.polypheny.db.backup.BackupEntityWrapper; +import org.polypheny.db.backup.BackupInformationObject; +import org.polypheny.db.catalog.entity.logical.LogicalEntity; +import org.polypheny.db.catalog.logistic.DataModel; +import org.polypheny.db.util.Pair; + +@Value +public class ElementModel { + + @JsonProperty + public String initialName; + + @JsonProperty + public String adjustedName; + + @JsonProperty + public TypeName type; + + @JsonProperty + public DataModel model; + + @JsonProperty + public List children; + + @JsonProperty + @JsonDeserialize(using = JsonForceNullDeserializer.class) + public List> dependencies; // other element name / reason for dependency + + @JsonProperty + public BackupType backupType; + + @JsonProperty + public String additionalInformation; + + + @JsonCreator + public ElementModel( + @JsonProperty("initialName") @Nonnull String initialName, + @JsonProperty("adjustedName") @Nullable String adjustedName, + @JsonProperty("type") @Nonnull TypeName type, + @JsonProperty("model") @Nullable DataModel model, + @JsonProperty("children") @Nonnull List children, + @JsonProperty("dependencies") @Nonnull List> dependencies, + @JsonProperty("backupType") @Nullable BackupType backupType, + @JsonProperty("additionalInformation") @Nullable String additionalInformation ) { + this.initialName = initialName; + this.adjustedName = adjustedName; + this.type = type; + this.model = model; + this.children = children; + this.dependencies = dependencies; + this.backupType = backupType; + this.additionalInformation = additionalInformation; + } + + + public enum TypeName { + NAMESPACE, + TABLE, + COLLECTION, + GRAPH, + COLUMN, + VIEW, + MATERIALIZED_VIEW, + FIELD + } + + + // todo ff: write converter for all webui object types + public static List fromBackupObject( BackupInformationObject backupObject ) { + List elements = new ArrayList<>(); + backupObject.getWrappedNamespaces().values().stream().map( namespaces -> + new ElementModel( + namespaces.getEntityObject().name, + null, + TypeName.NAMESPACE, + DataModel.RELATIONAL, + new ArrayList<>( + backupObject.getWrappedTables() + .values() + .stream() + .flatMap( + tables -> tables.stream() + .filter( t -> t.getEntityObject().namespaceId == namespaces.getEntityObject().id ) + .map( table -> fromBackupTable( table, backupObject ) ) ) + .collect( Collectors.toList() ) + ), + new ArrayList<>(), + BackupType.NONE, + null ) ) + .forEach( elements::add ); + // todo ff: write converter for all webui object types + return elements; + } + + + // todo ff add dependencies logic + private static ElementModel fromBackupTable( BackupEntityWrapper table, BackupInformationObject backupObject ) { + return new ElementModel( + table.getEntityObject().name, + null, + TypeName.TABLE, + DataModel.RELATIONAL, + backupObject.getColumns().values().stream().flatMap( columns -> columns.stream().filter( column -> column.tableId == table.getEntityObject().id ) ) + .map( column -> + new ElementModel( + column.name, + null, + TypeName.COLUMN, + DataModel.RELATIONAL, + new ArrayList<>(), + new ArrayList<>(), // todo ff add dependencies logic + BackupType.NONE, + null ) ).collect( Collectors.toList() ), + new ArrayList<>(), + BackupType.NONE, + null ); + } + + + // example method, delete later + public static List getDummyRels() { + return List.of( + new ElementModel( + "testNamespace", + null, + TypeName.NAMESPACE, + DataModel.RELATIONAL, + List.of( new ElementModel( + "testTable2", + null, + TypeName.TABLE, + DataModel.RELATIONAL, + List.of( + new ElementModel( + "testColumn", + null, + TypeName.COLUMN, + DataModel.RELATIONAL, + List.of(), + List.of(), + BackupType.NONE, + null + ), + new ElementModel( + "testColumn2", + null, + TypeName.COLUMN, + DataModel.RELATIONAL, + List.of(), + List.of(), + BackupType.NONE, + null + ) + ), + List.of(), + BackupType.NONE, + "This is some additional information." + ) ), + List.of(), + BackupType.NONE, + null ), + new ElementModel( + "testNamespace2", + null, + TypeName.NAMESPACE, + DataModel.RELATIONAL, + List.of( new ElementModel( + "testTable2", + null, + TypeName.TABLE, + DataModel.RELATIONAL, + List.of( + new ElementModel( + "testColumn", + null, + TypeName.COLUMN, + DataModel.RELATIONAL, + List.of(), + List.of(), + BackupType.NONE, + null + ), + new ElementModel( + "testColumn2", + null, + TypeName.COLUMN, + DataModel.RELATIONAL, + List.of(), + List.of( Pair.of( "testTable2", "testColumn" ) ), + BackupType.NONE, + null + ) + ), + List.of(), + BackupType.NONE, + "This is some additional information." + ) ), + List.of(), + BackupType.NONE, + null + ) + + ); + } + + + // example method, delete later + public static ElementModel getDummyDoc() { + return new ElementModel( + "testNamespaceDoc", + null, + TypeName.NAMESPACE, + DataModel.DOCUMENT, + List.of( new ElementModel( + "testCollection", + null, + TypeName.COLLECTION, + DataModel.DOCUMENT, + List.of(), + List.of(), + BackupType.NONE, + "This is some additional information." + ) ), + List.of(), + BackupType.NONE, + null + ); + } + + + // example method, delete later + public static ElementModel getDummyGraph() { + return new ElementModel( + "testNamespaceGraph", + null, + TypeName.NAMESPACE, + DataModel.GRAPH, + List.of(), + List.of(), + BackupType.NONE, + null + ); + } + + + private static class JsonForceNullDeserializer extends JsonDeserializer { + + @Override + public Object deserialize( JsonParser p, DeserializationContext ctxt ) throws IOException, JacksonException { + return null; + } + + } + +} diff --git a/dbms/src/main/java/org/polypheny/db/backup/webui/models/ManifestModel.java b/dbms/src/main/java/org/polypheny/db/backup/webui/models/ManifestModel.java new file mode 100644 index 0000000000..44a1af77e4 --- /dev/null +++ b/dbms/src/main/java/org/polypheny/db/backup/webui/models/ManifestModel.java @@ -0,0 +1,72 @@ +/* + * Copyright 2019-2023 The Polypheny Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.polypheny.db.backup.webui.models; + +import com.fasterxml.jackson.annotation.JsonProperty; +import com.fasterxml.jackson.core.JsonGenerator; +import com.fasterxml.jackson.databind.JsonSerializer; +import com.fasterxml.jackson.databind.SerializerProvider; +import com.fasterxml.jackson.databind.annotation.JsonSerialize; +import com.fasterxml.jackson.databind.jsontype.TypeSerializer; +import java.io.IOException; +import java.util.ArrayList; +import java.util.List; +import org.polypheny.db.type.entity.temporal.PolyTimestamp; + +/** + * @param elements 3 namespaces at max with entities and columns as children and sub-children + */ +public record ManifestModel(@JsonProperty long id, @JsonProperty List elements, @JsonProperty @JsonSerialize(using = PolyTimestampSerializer.class) PolyTimestamp timestamp) { + + + public ManifestModel( + @JsonProperty("id") long id, + @JsonProperty("elements") List elements, + @JsonProperty("timestamp") PolyTimestamp timestamp ) { + this.id = id; + this.elements = elements; + this.timestamp = timestamp; + } + + + private static class PolyTimestampSerializer extends JsonSerializer { + + @Override + public void serializeWithType( PolyTimestamp value, JsonGenerator gen, SerializerProvider serializers, TypeSerializer typeSer ) throws IOException { + serialize( value, gen, serializers ); + } + + + @Override + public void serialize( PolyTimestamp value, JsonGenerator gen, SerializerProvider serializers ) throws IOException { + gen.writeString( value.toHumanReadable() ); + } + + } + + + // dummy method to delete later + public static ManifestModel getDummy() { + List elements = new ArrayList<>(); + elements.addAll( ElementModel.getDummyRels() ); + elements.add( ElementModel.getDummyDoc() ); + elements.add( ElementModel.getDummyGraph() ); + return new ManifestModel( -1, elements, new PolyTimestamp( 1L ) ); + } + + +} diff --git a/core/src/main/java/org/polypheny/db/algebra/enumerable/impl/package-info.java b/dbms/src/main/java/org/polypheny/db/backup/webui/models/StatusModel.java similarity index 62% rename from core/src/main/java/org/polypheny/db/algebra/enumerable/impl/package-info.java rename to dbms/src/main/java/org/polypheny/db/backup/webui/models/StatusModel.java index 1d3a364f69..bb17d43542 100644 --- a/core/src/main/java/org/polypheny/db/algebra/enumerable/impl/package-info.java +++ b/dbms/src/main/java/org/polypheny/db/backup/webui/models/StatusModel.java @@ -1,6 +1,5 @@ - /* - * Copyright 2019-2024 The Polypheny Project + * Copyright 2019-2023 The Polypheny Project * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -15,9 +14,24 @@ * limitations under the License. */ -/** - * Polypheny-DB-specific classes for implementation of regular and window aggregates. - */ +package org.polypheny.db.backup.webui.models; + +import lombok.AllArgsConstructor; +import lombok.Value; + +@Value +@AllArgsConstructor +public class StatusModel { + + public Code code; + + public String message; + -package org.polypheny.db.algebra.enumerable.impl; + public enum Code { + SUCCESS, + RUNNING, + ERROR; + } +} \ No newline at end of file diff --git a/dbms/src/main/java/org/polypheny/db/gui/AboutWindow.java b/dbms/src/main/java/org/polypheny/db/gui/AboutWindow.java index 458827dec0..ac0f5a2d91 100644 --- a/dbms/src/main/java/org/polypheny/db/gui/AboutWindow.java +++ b/dbms/src/main/java/org/polypheny/db/gui/AboutWindow.java @@ -86,7 +86,7 @@ public AboutPanel() { JButton ossButton = new JButton(); final URI ossUri = new URI( "https://polypheny.org/community/acknowledgements/acknowledgements.txt" ); - ossButton.setText( "open-source software" ); + ossButton.setText( "execute-source software" ); ossButton.setBorderPainted( false ); ossButton.setOpaque( false ); ossButton.setFont( new Font( "Verdana", Font.PLAIN, 14 ) ); diff --git a/dbms/src/test/java/org/polypheny/db/backup/DependencyCircleTest.java b/dbms/src/test/java/org/polypheny/db/backup/DependencyCircleTest.java new file mode 100644 index 0000000000..ba62caff76 --- /dev/null +++ b/dbms/src/test/java/org/polypheny/db/backup/DependencyCircleTest.java @@ -0,0 +1,272 @@ +/* + * Copyright 2019-2023 The Polypheny Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.polypheny.db.backup; + +import static org.junit.jupiter.api.Assertions.assertEquals; + +import com.google.common.collect.ImmutableMap; +import java.sql.Connection; +import java.sql.SQLException; +import java.sql.Statement; +import java.util.List; +import lombok.extern.slf4j.Slf4j; +import org.junit.jupiter.api.AfterAll; +import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.api.Tag; +import org.junit.jupiter.api.Test; +import org.polypheny.db.TestHelper; +import org.polypheny.db.TestHelper.JdbcConnection; +import org.polypheny.db.catalog.entity.logical.LogicalEntity; +import org.polypheny.db.transaction.TransactionManager; + + +@SuppressWarnings({ "SqlDialectInspection", "SqlNoDataSourceInspection" }) +@Slf4j +@Tag("adapter") +public class DependencyCircleTest { + + static TestHelper testHelper; + BackupManager backupManager; + + + @BeforeAll + public static void start() { + // Ensures that Polypheny-DB is running + //noinspection ResultOfMethodCallIgnored + //this.testHelper = TestHelper.getInstance(); + testHelper = TestHelper.getInstance(); + //deleteOldData(); + //this.backupManager = new BackupManager( testHelper.getTransactionManager() ); + //addTestData(); + //addDependenyTestData(); + + } + + + @AfterAll + public static void stop() { + deleteDependencyTestData(); + } + + + private static void addTestData() { + try ( JdbcConnection jdbcConnection = new JdbcConnection( false ) ) { + Connection connection = jdbcConnection.getConnection(); + try ( Statement statement = connection.createStatement() ) { + statement.executeUpdate( "CREATE NAMESPACE schema1" ); + statement.executeUpdate( "CREATE TABLE schema1.table1( id INTEGER NOT NULL, PRIMARY KEY(id))" ); + statement.executeUpdate( "ALTER TABLE schema1.table1 ADD COLUMN name VARCHAR (255) NULL" ); + statement.executeUpdate( "ALTER TABLE schema1.table1 ADD UNIQUE INDEX index1 ON id ON STORE hsqldb" ); + statement.executeUpdate( "CREATE TABLE schema1.table2( id INTEGER NOT NULL, PRIMARY KEY(id) )" ); + statement.executeUpdate( "ALTER TABLE schema1.table2 ADD CONSTRAINT fk_id FOREIGN KEY (id) REFERENCES schema1.table1(id) ON UPDATE RESTRICT ON DELETE RESTRICT" ); + statement.executeUpdate( "CREATE DOCUMENT SCHEMA private" ); + connection.commit(); + } + } catch ( SQLException e ) { + log.error( "Exception while adding test data", e ); + } + } + + + private static void deleteOldData() { + try ( JdbcConnection jdbcConnection = new JdbcConnection( false ) ) { + Connection connection = jdbcConnection.getConnection(); + try ( Statement statement = connection.createStatement() ) { + try { + statement.executeUpdate( "ALTER TABLE schema1.table2 DROP FOREIGN KEY fk_id" ); + statement.executeUpdate( "ALTER TABLE schema1.table1 DROP INDEX index1" ); + } catch ( SQLException e ) { + log.error( "Exception while deleting old data", e ); + } + try { + statement.executeUpdate( "DROP TABLE schema1.table1" ); + } catch ( SQLException e ) { + log.error( "Exception while deleting old data", e ); + } + try { + statement.executeUpdate( "DROP TABLE schema1.table2" ); + } catch ( SQLException e ) { + log.error( "Exception while deleting old data", e ); + } + statement.executeUpdate( "DROP SCHEMA schema1" ); + statement.executeUpdate( "DROP SCHEMA private" ); + connection.commit(); + } + } catch ( SQLException e ) { + log.error( "Exception while deleting old data", e ); + } + } + + + @Test + public void testGatherData() { + TransactionManager transactionManager = testHelper.getTransactionManager(); + BackupManager backupManager = BackupManager.getINSTANCE(); + int setBatchSize = -1; + + addBasicRelTestData(); + + backupManager.startDataGathering( setBatchSize ); + BackupInformationObject bupobj = backupManager.getBackupInformationObject(); + + // go through all tables in the bupobj and add the table names to a string, which will be printed + StringBuilder sb = new StringBuilder(); + ImmutableMap> tables = bupobj.getTables(); + for ( Long key : tables.keySet() ) { + List tableList = tables.get( key ); + for ( LogicalEntity entity : tableList ) { + sb.append( entity.name ).append( "\n" ); + } + } + log.warn( sb.toString() ); + + + assertEquals( 2, tables.size(), "Wrong number of tables" ); + + deleteBasicRelTestData(); + } + + + private static void addDependenyTestData() { + try ( JdbcConnection jdbcConnection = new JdbcConnection( false ) ) { + Connection connection = jdbcConnection.getConnection(); + try ( Statement statement = connection.createStatement() ) { + statement.executeUpdate( "CREATE NAMESPACE reli" ); + statement.executeUpdate( "CREATE NAMESPACE temp" ); + statement.executeUpdate( "CREATE NAMESPACE lol" ); + statement.executeUpdate( "create table reli.t1 (t1pk integer not null, t1fk integer not null, PRIMARY KEY(t1pk))" ); + statement.executeUpdate( "create table reli.t2 (t2pk integer not null, t2fk integer not null, PRIMARY KEY(t2pk))" ); + statement.executeUpdate( "create table reli.t3 (t2pk integer not null, PRIMARY KEY(t2pk))" ); + statement.executeUpdate( "create table temp.t4 (t4pk integer not null,t4fk integer not null, PRIMARY KEY(t4pk))" ); + statement.executeUpdate( "create table temp.t5 (t5pk integer not null,t5fk integer not null, PRIMARY KEY(t5pk))" ); + statement.executeUpdate( "create table temp.t6 (t6pk integer not null, t6fk integer not null, PRIMARY KEY(t6pk))" ); + statement.executeUpdate( "alter table reli.t1 add constraint test foreign key (t1fk) references temp.t6 (t6pk) ON UPDATE RESTRICT ON DELETE RESTRICT" ); + statement.executeUpdate( "alter table reli.t2 add constraint test foreign key (t2fk) references reli.t1 (t1pk) ON UPDATE RESTRICT ON DELETE RESTRICT" ); + statement.executeUpdate( "alter table temp.t4 add constraint test foreign key (t4fk) references reli.t1 (t1pk) ON UPDATE RESTRICT ON DELETE RESTRICT" ); + statement.executeUpdate( "alter table temp.t5 add constraint test foreign key (t5fk) references temp.t4 (t4pk) ON UPDATE RESTRICT ON DELETE RESTRICT" ); + statement.executeUpdate( "alter table temp.t6 add constraint test foreign key (t6fk) references temp.t5 (t5pk) ON UPDATE RESTRICT ON DELETE RESTRICT" ); + connection.commit(); + } + } catch ( SQLException e ) { + log.error( "Exception while adding test data", e ); + } + } + + + private static void deleteDependencyTestData() { + try ( JdbcConnection jdbcConnection = new JdbcConnection( false ) ) { + Connection connection = jdbcConnection.getConnection(); + try ( Statement statement = connection.createStatement() ) { + /* + try { + statement.executeUpdate( "ALTER TABLE schema1.table2 DROP FOREIGN KEY fk_id" ); + statement.executeUpdate( "ALTER TABLE schema1.table1 DROP INDEX index1" ); + } catch ( SQLException e ) { + log.error( "Exception while deleting old data", e ); + } + try { + statement.executeUpdate( "DROP TABLE schema1.table1" ); + } catch ( SQLException e ) { + log.error( "Exception while deleting old data", e ); + } + try { + statement.executeUpdate( "DROP TABLE schema1.table2" ); + } catch ( SQLException e ) { + log.error( "Exception while deleting old data", e ); + } + + */ + statement.executeUpdate( "DROP SCHEMA reli" ); + statement.executeUpdate( "DROP SCHEMA temp" ); + statement.executeUpdate( "DROP SCHEMA lol" ); + connection.commit(); + } + } catch ( SQLException e ) { + log.error( "Exception while deleting old data", e ); + } + } + + private static void addBasicRelTestData() { + try ( JdbcConnection jdbcConnection = new JdbcConnection( false ) ) { + Connection connection = jdbcConnection.getConnection(); + try ( Statement statement = connection.createStatement() ) { + statement.executeUpdate( "CREATE NAMESPACE reli" ); + statement.executeUpdate( "CREATE TABLE reli.album(albumId INTEGER NOT NULL, albumName VARCHAR(255), nbrSongs INTEGER,PRIMARY KEY (albumId))" ); + statement.executeUpdate( "INSERT INTO reli.album VALUES (1, 'Best Album Ever!', 10), (2, 'Pretty Decent Album...', 15), (3, 'Your Ears will Bleed!', 13)" ); + connection.commit(); + } + } catch ( SQLException e ) { + log.error( "Exception while adding test data", e ); + } + } + + private static void deleteBasicRelTestData() { + try ( JdbcConnection jdbcConnection = new JdbcConnection( false ) ) { + Connection connection = jdbcConnection.getConnection(); + try ( Statement statement = connection.createStatement() ) { + statement.executeUpdate( "DROP TABLE reli.album" ); + statement.executeUpdate( "DROP SCHEMA reli" ); + connection.commit(); + } + } catch ( SQLException e ) { + log.error( "Exception while deleting old data", e ); + } + } + + + + @Test + public void testSimpleRelational() { + try ( JdbcConnection polyphenyDbConnection = new JdbcConnection( false ) ) { + Connection connection = polyphenyDbConnection.getConnection(); + try ( Statement statement = connection.createStatement() ) { + statement.executeUpdate( "CREATE NAMESPACE reli2" ); + statement.executeUpdate( "CREATE TABLE reli2.t1 (t1pk INTEGER NOT NULL, t1fk INTEGER NOT NULL, PRIMARY KEY (t1pk))" ); + for ( int i = 0; i < 100; i++ ) { + statement.executeUpdate( String.format( "INSERT INTO reli2.t1 VALUES(%s,%s)", i, i * 2 ) ); + } + connection.commit(); + + } catch ( SQLException e ) { + log.error( "Exception while adding test data", e ); + } + + } catch ( SQLException e ) { + log.error( "Exception while testing getCatalogs()", e ); + } + + backupManager = BackupManager.getINSTANCE(); + backupManager.startDataGathering( -1 ); + + try ( JdbcConnection polyphenyDbConnection = new JdbcConnection( false ) ) { + Connection connection = polyphenyDbConnection.getConnection(); + try ( Statement statement = connection.createStatement() ) { + statement.executeUpdate( "DROP TABLE reli2.t1" ); + connection.commit(); + + } catch ( SQLException e ) { + log.error( "Exception while adding test data", e ); + } + + } catch ( SQLException e ) { + log.error( "Exception while testing getCatalogs()", e ); + } + + backupManager.startInserting(); + } + +} diff --git a/dbms/src/test/java/org/polypheny/db/backup/evaluation/EvaluationTest.java b/dbms/src/test/java/org/polypheny/db/backup/evaluation/EvaluationTest.java new file mode 100644 index 0000000000..091a99d6ed --- /dev/null +++ b/dbms/src/test/java/org/polypheny/db/backup/evaluation/EvaluationTest.java @@ -0,0 +1,631 @@ +/* + * Copyright 2019-2024 The Polypheny Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.polypheny.db.backup.evaluation; + +import static java.lang.String.format; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.fail; + +import java.io.IOException; +import java.sql.Connection; +import java.sql.SQLException; +import java.sql.Statement; +import java.util.ArrayList; +import lombok.extern.slf4j.Slf4j; +import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.api.Tag; +import org.junit.jupiter.api.Test; +import org.polypheny.db.TestHelper; +import org.polypheny.db.TestHelper.CypherConnection; +import org.polypheny.db.TestHelper.JdbcConnection; +import org.polypheny.db.TestHelper.MongoConnection; +import org.polypheny.db.backup.BackupManager; +import org.polypheny.db.catalog.Catalog; +import org.polypheny.db.catalog.catalogs.AdapterCatalog; +import org.polypheny.db.catalog.snapshot.Snapshot; +import org.polypheny.db.transaction.TransactionManager; +import org.polypheny.db.webui.models.results.DocResult; +import org.polypheny.db.webui.models.results.GraphResult; + + +@Slf4j +@Tag("adapter") +public class EvaluationTest { + + static TestHelper testHelper; + BackupManager backupManager; + + + @BeforeAll + public static void start() { + testHelper = TestHelper.getInstance(); + } + + /* + @Test + public void testingForEvaluation() { + TransactionManager transactionManager = testHelper.getTransactionManager(); + BackupManager backupManager = BackupManager.getINSTANCE(); + + addBasicRelTestData(); + addBasicGraphData(); + addBasicDocData(); + + backupManager.startDataGathering(); + BackupInformationObject bupobj = backupManager.getBackupInformationObject(); + + // go through all tables in the bupobj and add the table names to a string, which will be printed + StringBuilder sb = new StringBuilder(); + ImmutableMap> tables = bupobj.getTables(); + for ( Long key : tables.keySet() ) { + List tableList = tables.get( key ); + for ( LogicalEntity entity : tableList ) { + sb.append( entity.name ).append( "\n" ); + } + } + log.warn( sb.toString() ); + + // read the graph from backup file and print the PolyValue + //PolyValue deserialized = PolyValue.fromTypedJson( inLine, PolyGraph.class ); //--> deserialized is null?? + //String value = deserialized.toJson(); + int lol = 2; + + + //assertEquals( 2, tables.size(), "Wrong number of tables" ); + assertEquals( 2, 2, "Wrong number of tables" ); + + deleteBasicRelTestData(); + deleteBasicGraphData(); + deleteBasicDocData(); + } + + */ + + + /** + * Start the evaluation for different batchsizes against different data volumes + */ + @Test + public void startEvaluation() { + + + startMeasure( "batchSize5", "s", "simple", "rel", 5 ); + startMeasure( "batchSize500", "s", "simple", "rel", 500 ); + startMeasure( "batchSize5000", "s", "simple", "rel", 5000 ); + + startMeasure( "batchSize5", "m", "simple", "rel", 5 ); + startMeasure( "batchSize500", "m", "simple", "rel", 500 ); + startMeasure( "batchSize5000", "m", "simple", "rel", 5000 ); + + startMeasure( "batchSize5", "l", "simple", "rel", 5 ); + startMeasure( "batchSize500", "l", "simple", "rel", 500 ); + startMeasure( "batchSize5000", "l", "simple", "rel", 5000 ); + + + + startMeasure( "batchSize5", "s", "simple", "graph", 5 );//yes + startMeasure( "batchSize500", "s", "simple", "graph", 500 );//yes + startMeasure( "batchSize1000", "s", "simple", "graph", 1000 );//yes + + + startMeasure( "batchSize5", "m", "simple", "graph", 5 );//yes + startMeasure( "batchSize500", "m", "simple", "graph", 500 );//yes + startMeasure( "batchSize1000", "m", "simple", "graph", 1000 );//yes + + + + startMeasure( "batchSize5", "l", "simple", "graph", 5 );//yes + startMeasure( "batchSize500", "l", "simple", "graph", 500 );//yes + startMeasure( "batchSize1000", "l", "simple", "graph", 1000 ); + + + startMeasure( "batchSize5", "s", "simple", "doc", 5 ); + startMeasure( "batchSize500", "s", "simple", "doc", 500 ); + startMeasure( "batchSize5000", "s", "simple", "doc", 5000 ); + + startMeasure( "batchSize5", "m", "simple", "doc", 5 ); + startMeasure( "batchSize500", "m", "simple", "doc", 500 ); + startMeasure( "batchSize5000", "m", "simple", "doc", 5000 ); + + startMeasure( "batchSize500", "l", "simple", "doc", 500 ); + startMeasure( "batchSize5000", "l", "simple", "doc", 5000 ); + startMeasure( "batchSize5", "l", "simple", "doc", 5 ); + + + //To see if everything is working + assertEquals( 2, 2, "Wrong number of tables" ); + + } + + + /** + * Test the evaluation for the relational data model + * @throws InterruptedException + */ + @Test public void relTest() throws InterruptedException { + BackupManager backupManager = BackupManager.getINSTANCE(); + //backupManager.setBatchSize( 10 ); + addBasicRelData( 6 ); + + backupManager.startDataGathering( -1 ); + + assertEquals( 2, 2, "Wrong number of tables" ); + //Thread.sleep( 5000 ); + } + + + /** + * Test the evaluation for the document data model + */ + @Test + public void docTest() { + BackupManager backupManager = BackupManager.getINSTANCE(); + addBasicDocData(5); + backupManager.startDataGathering( -1 ); + + deleteBasicDocData(); + backupManager.startInserting(); + deleteBasicDocData(); + backupManager.startInserting(); + deleteBasicDocData(); + backupManager.startInserting(); + assertEquals( 2, 2, "Wrong number of tables" ); + } + + + /** + * Test the evaluation for the graph data model + * @throws InterruptedException when something goes wrong with the threadpool in the data collection and insertion process + */ + @Test + public void graphTest() throws InterruptedException { + BackupManager backupManager = BackupManager.getINSTANCE(); + addBasicGraphData(10); + backupManager.startDataGathering( -1 ); + + deleteBasicGraphData(); + backupManager.startInserting(); + deleteBasicGraphData(); + backupManager.startInserting(); + deleteBasicGraphData(); + backupManager.startInserting(); + assertEquals( 2, 2, "Wrong number of tables" ); + //Thread.sleep( 5000 ); + } + + + /** + * Start the measurement for the backup creation and insertion time + * + * @param parameter What backup parameter is measured (e.g. batchSize) + * @param scale The scale of the data (s|m|l) + * @param complexity The complexity of the data (simple|complex) + * @param dataModel The data model to be tested (rel|doc|graph|allModels) + * @param batchSize + * @throws IOException when something goes wrong with creating a filewriter + */ + private static void startMeasure ( String parameter, String scale, String complexity, String dataModel, int batchSize ) { + TransactionManager transactionManager = testHelper.getTransactionManager(); + BackupManager backupManager = BackupManager.getINSTANCE(); + int nbrEntries = 0; + + switch ( scale ) { + case "s": + nbrEntries = 5; + break; + case "m": + nbrEntries = 500; + break; + case "l": + if (dataModel.equals( "graph" )) { + nbrEntries = 1000; + } else { + nbrEntries = 5000; + } + break; + default: + break; + } + + //parameter: e.g. batchsize, scaling: [s|m|l], complexity: [simple|complex], type: [collection|insertion], dataModel: [rel|doc|graph|allModels] + // file title: e.g. rel_batchSize10_s_simple_collection + String fileName = ""; + + ArrayList measuredTime = new ArrayList(); + WriteToCSV writeToCSV = new WriteToCSV(); + + switch ( dataModel ) { + case "rel": + if ( complexity.equals( "simple" )) { + //collection + addBasicRelData( nbrEntries ); + + fileName = String.format( "%s_%s_%s_%s_collection", dataModel, parameter, scale, complexity); + measuredTime = measureBackupCreationTime( backupManager, complexity, dataModel, batchSize ); + writeToCSV.writeToCSV( measuredTime, fileName ); + + deleteBasicRelData(); + measuredTime.clear(); + + //insertion + fileName = String.format( "%s_%s_%s_%s_insertion", dataModel, parameter, scale, complexity); + measuredTime = measureBackupInsertionTime( backupManager, complexity, dataModel ); + writeToCSV.writeToCSV( measuredTime, fileName ); + + measuredTime.clear(); + } else if ( complexity.equals( "complex" ) ) { + //addComplexRelEvalData( 6 ); + } + + break; + + + case "doc": + if ( complexity.equals( "simple" )) { + //collection + addBasicDocData( nbrEntries ); + + fileName = String.format( "%s_%s_%s_%s_collection", dataModel, parameter, scale, complexity); + measuredTime = measureBackupCreationTime( backupManager, complexity, dataModel, batchSize ); + writeToCSV.writeToCSV( measuredTime, fileName ); + + deleteBasicDocData(); + measuredTime.clear(); + + //insertion + fileName = String.format( "%s_%s_%s_%s_insertion", dataModel, parameter, scale, complexity); + measuredTime = measureBackupInsertionTime( backupManager, complexity, dataModel ); + writeToCSV.writeToCSV( measuredTime, fileName ); + + measuredTime.clear(); + } else if ( complexity.equals( "complex" ) ) { + //addComplexRelEvalData( 6 ); + } + break; + + + case "graph": + if ( complexity.equals( "simple" )) { + //collection + addBasicGraphData( nbrEntries ); + + fileName = String.format( "%s_%s_%s_%s_collection", dataModel, parameter, scale, complexity); + measuredTime = measureBackupCreationTime( backupManager, complexity, dataModel, batchSize ); + writeToCSV.writeToCSV( measuredTime, fileName ); + + deleteBasicGraphData(); + measuredTime.clear(); + + //insertion + fileName = String.format( "%s_%s_%s_%s_insertion", dataModel, parameter, scale, complexity); + measuredTime = measureBackupInsertionTime( backupManager, complexity, dataModel ); + writeToCSV.writeToCSV( measuredTime, fileName ); + + measuredTime.clear(); + } else if ( complexity.equals( "complex" ) ) { + //addComplexRelEvalData( 6 ); + } + break; + + case "allModels": + default: + if ( dataModel.isEmpty() ) { + dataModel = "allModels"; + } + if ( complexity.equals( "simple" )) { + addBasicRelData( nbrEntries ); + addBasicDocData( nbrEntries ); + addBasicGraphData( nbrEntries ); + + fileName = String.format( "%s_%s_%s_%s_collection", dataModel, parameter, scale, complexity); + measuredTime = measureBackupCreationTime( backupManager, complexity, dataModel, batchSize ); + writeToCSV.writeToCSV( measuredTime, fileName ); + + deleteBasicRelData(); + deleteBasicDocData(); + deleteBasicGraphData(); + measuredTime.clear(); + + //insertion + fileName = String.format( "%s_%s_%s_%s_insertion", dataModel, parameter, scale, complexity); + measuredTime = measureBackupInsertionTime( backupManager, complexity, dataModel ); + writeToCSV.writeToCSV( measuredTime, fileName ); + + measuredTime.clear(); + } else if ( complexity.equals( "complex" ) ) { + //addComplexRelEvalData( 6 ); + } + break; + } + + } + + + private static ArrayList measureBackupCreationTime ( BackupManager backupManager, String complexity, String dataModel, int batchSize ) { + ArrayList measuredTime = new ArrayList(); + long startTime; + long elapsedTime; + // perform 5 warmup runs + backupManager.startDataGathering( batchSize ); + backupManager.startDataGathering( batchSize ); + backupManager.startDataGathering( batchSize ); + backupManager.startDataGathering( batchSize ); + backupManager.startDataGathering( batchSize ); + + // perform 500 runs + for(int i=0; i< 500; i++){ + startTime = System.nanoTime(); + backupManager.startDataGathering( batchSize ); + elapsedTime = System.nanoTime() - startTime; + measuredTime.add(elapsedTime); + log.info( "Time" + dataModel+complexity + ": " + elapsedTime); + } + return measuredTime; + } + + + /** + * Measure the time it takes to insert the backup data + * @param backupManager the backup manager + * @param complexity the complexity of the data can be (simple|complex) + * @param dataModel the data model to be tested (rel|doc|graph|allModels) + * @return a list of the measured time in nanoseconds + */ + private static ArrayList measureBackupInsertionTime ( BackupManager backupManager, String complexity, String dataModel ) { + ArrayList measuredTime = new ArrayList(); + long startTime; + long elapsedTime; + //parameter: e.g. batchsize, scaling: [s|m|l], complexity: [simple|complex], type: [collection|insertion], dataModel: [rel|doc|graph|allModels] + // file title: e.g. rel_batchSize10_s_simple_collection + + // perform warmup runs, 5 times + for(int i=0; i< 5; i++){ + backupManager.startInserting(); + switch ( dataModel ) { + case "rel": + deleteBasicRelData(); + break; + case "doc": + deleteBasicDocData(); + break; + case "graph": + deleteBasicGraphData(); + break; + case "allModels": + default: + deleteBasicRelData(); + deleteBasicDocData(); + deleteBasicGraphData(); + break; + } + } + + // perform 500 runs + for(int i=0; i< 500; i++){ + startTime = System.nanoTime(); + backupManager.startInserting(); + elapsedTime = System.nanoTime() - startTime; + measuredTime.add(elapsedTime); + switch ( dataModel ) { + case "rel": + deleteBasicRelData(); + break; + case "doc": + deleteBasicDocData(); + break; + case "graph": + deleteBasicGraphData(); + break; + case "allModels": + default: + deleteBasicRelData(); + deleteBasicDocData(); + deleteBasicGraphData(); + break; + } + } + return measuredTime; + } + + //------------------------------------------------------------------------ + + /** + * Add simple relational evaluation data - simple structure + * @param nbrRows number of rows to add + */ + private static void addBasicRelData( int nbrRows ) { + + TestHelper.executeSql( + ( c, s ) -> s.executeUpdate( "CREATE NAMESPACE reli" ), + ( c, s ) -> s.executeUpdate( "CREATE TABLE reli.TableA(ID INTEGER NOT NULL, NAME VARCHAR(20), AGE INTEGER, PRIMARY KEY (ID))" ) + ); + + for ( int i = 0; i < nbrRows; i++ ) { + int finalI = i; + int finalI1 = i; + TestHelper.executeSql( + ( c, s ) -> s.executeUpdate( "INSERT INTO reli.TableA VALUES (" + finalI + ", 'Name" + finalI1 + "', 60)" ) + ); + } + + TestHelper.executeSql( + ( c, s ) -> c.commit() + ); + } + + + /** + * Delete simple relational evaluation data created with the addBasicRelData method (deletes the namespace) + */ + private static void deleteBasicRelData() { + TestHelper.executeSql( + ( c, s ) -> s.executeUpdate( "DROP SCHEMA IF EXISTS reli" ), + ( c, s ) -> c.commit() + ); + } + + //------------------------------------------------------------------------ + + + /** + * Add simple graph test data with a number of nodes and edges. Edges are always between two nodes (each has one property) + * @param nbrNodes how many nodes you want to create + */ + private static void addBasicGraphData( int nbrNodes ) { + //nbr Edges is nbrNodes/2 + + String GRAPH_NAME = "graphtest"; + String nodesString = ""; + String edgesString = ""; + + //create graph + executeGraph( format( "CREATE DATABASE %s", GRAPH_NAME ) ); + executeGraph( format( "USE GRAPH %s", GRAPH_NAME ) ); + log.info( "started creating nodes and edges to insert" ); + + // divide nbr nodes by 3, so that we have 3 times as many nodes as edges + int nbrNodesReal = (nbrNodes/3)*2; + int nbrEdges = (nbrNodes/3) - 1; + + + for (int i = 0; i < nbrNodes; i++) { + String nString = "n" + i; + //nodesString += String.format( "(%s:Person {name: 'Ann', age: 45, depno: 13}), ", nString); + String nodeQuery = String.format( "CREATE (%s:Person {name: 'Ann'})", nString ); + executeGraph( nodeQuery, GRAPH_NAME ); + } + + /* Uncomment to also create edges + // connect two nodes with an edge + for ( int i = 0; i < nbrEdges; i++ ) { + if ( i%2 != 0 ) { + String nString = "n" + i; + String j = String.valueOf( i-1 ); + String eString = "n" + j; + //edgesString += String.format( "(%s)-[:KNOWS {since: 1994}]->(%s), ", eString, nString); + String edgeQuery = String.format( "CREATE (%s)-[:KNOWS {since: 1994}]->(%s)", eString, nString ); + executeGraph( edgeQuery, GRAPH_NAME ); + } + } + + */ + // remove the last ", " from the string + //nodesString = nodesString.substring( 0, nodesString.length() - 2 ); + //edgesString = edgesString.substring( 0, edgesString.length() - 2 ); + //String query = String.format( "CREATE %s, %s", nodesString, edgesString ); + //log.info( query ); + + //executeGraph( query, GRAPH_NAME ); + } + + + /** + * Deletes the graph namespace created with the addBasicGraphData method + */ + private static void deleteBasicGraphData() { + deleteGraphData( "graphtest" ); + } + + //------------------------------------------------------------------------ + + + /** + * Add simple document test data with a number of documents (a document has 3 fields) + * @param nbrDocs how many documents you want to create + */ + private static void addBasicDocData( int nbrDocs ) { + initDatabase( "doctest" ); //database = namespace + createCollection( "doc1", "doctest" ); + + for ( int i = 0; i < nbrDocs; i++ ) { + executeDoc( "db.doc1.insert({name: 'Max" + i + "', age: 31, depno: 13})", "doctest" ); + } + //executeDoc( "db.doc1.insert({name: 'Max', age: 31, depno: 13})", "doctest" ); + //executeDoc( "db.doc1.insert({name: 'Hans', age: 45, depno: 13})", "doctest" ); + //executeDoc( "db.doc1.insert({name: 'Ann', age: 45, depno: 13})", "doctest" ); + } + + + /** + * Delete the document namespace created with the addBasicDocData method + */ + private static void deleteBasicDocData() { + //dropCollection( "doc1", "doctest" ); + dropDatabase( "doctest" ); + + } + + //------------------------------------------------------------------------ + + + public static void initDatabase( String database ) { + MongoConnection.executeGetResponse( "use " + database ); + } + + public static void dropDatabase( String database ) { + MongoConnection.executeGetResponse( "db.dropDatabase()", database ); + } + + public static void initCollection( String collection ) { + MongoConnection.executeGetResponse( "db.createCollection(" + collection + ")" ); + } + + public static void createCollection( String collection, String database ) { + MongoConnection.executeGetResponse( String.format( "db.createCollection( %s )", collection ), database ); + } + + public static void dropCollection( String collection ) { + MongoConnection.executeGetResponse( "db." + collection + ".drop()" ); + } + + public static DocResult executeDoc( String doc ) { + return MongoConnection.executeGetResponse( doc ); + } + + + public static DocResult executeDoc( String doc, String database ) { + return MongoConnection.executeGetResponse( doc, database ); + } + + + //------------------------------------------------------------------------ + + public static void deleteGraphData( String graph ) { + executeGraph( format( "DROP DATABASE %s IF EXISTS", graph ) ); + Snapshot snapshot = Catalog.getInstance().getSnapshot(); + AdapterCatalog adapterCatalog = Catalog.getInstance().getAdapterCatalog( 0 ).orElseThrow(); + } + + + public static GraphResult executeGraph( String query ) { + GraphResult res = CypherConnection.executeGetResponse( query ); + if ( res.getError() != null ) { + fail( res.getError() ); + } + return res; + } + + + public static GraphResult executeGraph( String query, String namespace ) { + GraphResult res = CypherConnection.executeGetResponse( query, namespace ); + if ( res.getError() != null ) { + fail( res.getError() ); + } + return res; + } + +} diff --git a/dbms/src/test/java/org/polypheny/db/backup/evaluation/WriteToCSV.java b/dbms/src/test/java/org/polypheny/db/backup/evaluation/WriteToCSV.java new file mode 100644 index 0000000000..5f56dcac72 --- /dev/null +++ b/dbms/src/test/java/org/polypheny/db/backup/evaluation/WriteToCSV.java @@ -0,0 +1,58 @@ +/* + * Copyright 2019-2024 The Polypheny Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.polypheny.db.backup.evaluation; + +import java.io.File; +import java.io.FileWriter; +import java.io.IOException; +import java.util.ArrayList; +import lombok.SneakyThrows; + +public class WriteToCSV { + + /** + * writes Data to a CSV + * @param data measured execution time + * @param typeOfData title for csv: Connection (PSQL, PolyPGI, PolyJdbc); Query Number (Q1); Nbr of Executions (E10000) + * @throws IOException when something goes wrong with creating a filewriter + */ + @SneakyThrows + public void writeToCSV ( ArrayList data, String typeOfData) { + System.out.println("started writing to csv file"); + + //typeOfData: Connection (PSQL, PolyPGI, PolyJdbc); Query Number (Q1); Nbr of Executions (E10000) + String filename = typeOfData + ".csv"; + String path = "C:\\Users\\esigu\\SynologyDrive\\01_Uni\\UniBasel\\23HS\\Bachelorarbeit\\Evaluation-Data\\" + filename; + File file= new File(path); + FileWriter filewriter = new FileWriter(file); + //filewriter.append("execution_time_in_nanosecs"); + //filewriter.append(','); + + for (int i = 0; i < data.size(); i++) { + String value = String.valueOf(data.get(i)*0.000001); //millisecs + filewriter.append(value); + filewriter.append(' '); + } + + filewriter.flush(); + filewriter.close(); + + System.out.println("finished writing to file" + filename); + + } + +} diff --git a/dbms/src/test/java/org/polypheny/db/entity/ScannableEntityTest.java b/dbms/src/test/java/org/polypheny/db/entity/ScannableEntityTest.java index 2a40d7bcfd..fdd1894911 100644 --- a/dbms/src/test/java/org/polypheny/db/entity/ScannableEntityTest.java +++ b/dbms/src/test/java/org/polypheny/db/entity/ScannableEntityTest.java @@ -65,6 +65,7 @@ public void testTens() { assertFalse( cursor.moveNext() ); } + private static Integer getFilter( boolean cooperative, List filters ) { final Iterator filterIter = filters.iterator(); while ( filterIter.hasNext() ) { diff --git a/dbms/src/test/java/org/polypheny/db/sql/view/BasicMaterializedViewTest.java b/dbms/src/test/java/org/polypheny/db/sql/view/BasicMaterializedViewTest.java index 7a29e42d08..753174d227 100644 --- a/dbms/src/test/java/org/polypheny/db/sql/view/BasicMaterializedViewTest.java +++ b/dbms/src/test/java/org/polypheny/db/sql/view/BasicMaterializedViewTest.java @@ -528,7 +528,7 @@ public void testOneStore() throws SQLException { try ( JdbcConnection polyphenyDbConnection = new JdbcConnection( true ) ) { Connection connection = polyphenyDbConnection.getConnection(); try ( Statement statement = connection.createStatement() ) { - // Deploy additional store + // Deploy additional storeId statement.executeUpdate( "ALTER ADAPTERS ADD \"store3\" USING 'Hsqldb' AS 'Store'" + " WITH '{maxConnections:\"25\",path:., trxControlMode:locks,trxIsolationLevel:read_committed,type:Memory,tableType:Memory,mode:embedded}'" ); @@ -577,10 +577,10 @@ public void testTwoStores() throws SQLException { try ( JdbcConnection polyphenyDbConnection = new JdbcConnection( true ) ) { Connection connection = polyphenyDbConnection.getConnection(); try ( Statement statement = connection.createStatement() ) { - // Deploy additional store + // Deploy additional storeId statement.executeUpdate( "ALTER ADAPTERS ADD \"store2\" USING 'Hsqldb' AS 'Store'" + " WITH '{maxConnections:\"25\",path:., trxControlMode:locks,trxIsolationLevel:read_committed,type:Memory,tableType:Memory,mode:embedded}'" ); - // Deploy additional store + // Deploy additional storeId statement.executeUpdate( "ALTER ADAPTERS ADD \"store3\" USING 'Hsqldb' AS 'Store'" + " WITH '{maxConnections:\"25\",path:., trxControlMode:locks,trxIsolationLevel:read_committed,type:Memory,tableType:Memory,mode:embedded}'" ); @@ -1007,7 +1007,6 @@ public void testUpdateFreshnessUpdates() throws SQLException { statement.executeUpdate( "INSERT INTO viewTestEmpTable VALUES ( 2, 'Ernst', 'Walter', 2), ( 3, 'Elsa', 'Kuster', 3 )" ); connection.commit(); - TestHelper.checkResultSetWithDelay( 4, 3, diff --git a/plugins/cottontail-adapter/src/main/java/org/polypheny/db/adapter/cottontail/algebra/CottontailScan.java b/plugins/cottontail-adapter/src/main/java/org/polypheny/db/adapter/cottontail/algebra/CottontailScan.java index 3831971809..449dc82886 100644 --- a/plugins/cottontail-adapter/src/main/java/org/polypheny/db/adapter/cottontail/algebra/CottontailScan.java +++ b/plugins/cottontail-adapter/src/main/java/org/polypheny/db/adapter/cottontail/algebra/CottontailScan.java @@ -53,6 +53,7 @@ public String algCompareString() { + entity.getLayer() + "&"; } + @Override public AlgOptCost computeSelfCost( AlgPlanner planner, AlgMetadataQuery mq ) { return super.computeSelfCost( planner, mq ).multiplyBy( 0.1 ); diff --git a/plugins/csv-adapter/src/main/java/org/polypheny/db/adapter/csv/CsvScannableTable.java b/plugins/csv-adapter/src/main/java/org/polypheny/db/adapter/csv/CsvScannableTable.java index 78df3cb45d..0bda8391a9 100644 --- a/plugins/csv-adapter/src/main/java/org/polypheny/db/adapter/csv/CsvScannableTable.java +++ b/plugins/csv-adapter/src/main/java/org/polypheny/db/adapter/csv/CsvScannableTable.java @@ -74,4 +74,5 @@ public Enumerator enumerator() { }; } + } diff --git a/plugins/pig-language/src/test/java/org/polypheny/db/test/PigAlgBuilderTest.java b/plugins/pig-language/src/test/java/org/polypheny/db/test/PigAlgBuilderTest.java index 05fcdf4e70..47d113c4f4 100644 --- a/plugins/pig-language/src/test/java/org/polypheny/db/test/PigAlgBuilderTest.java +++ b/plugins/pig-language/src/test/java/org/polypheny/db/test/PigAlgBuilderTest.java @@ -70,7 +70,6 @@ public void testScan() { } - @Test public void testDistinct() { // Syntax: @@ -107,8 +106,6 @@ public void testFilter() { } - - @Test public void testGroup() { // Syntax: @@ -154,7 +151,6 @@ public void testGroup2() { } - @Test public void testLoad() { // Syntax: @@ -168,5 +164,4 @@ public void testLoad() { } - } diff --git a/plugins/sql-language/src/main/java/org/polypheny/db/sql/language/ddl/SqlCreateMaterializedView.java b/plugins/sql-language/src/main/java/org/polypheny/db/sql/language/ddl/SqlCreateMaterializedView.java index 4c9f9c2086..9202ce83a8 100644 --- a/plugins/sql-language/src/main/java/org/polypheny/db/sql/language/ddl/SqlCreateMaterializedView.java +++ b/plugins/sql-language/src/main/java/org/polypheny/db/sql/language/ddl/SqlCreateMaterializedView.java @@ -66,8 +66,10 @@ public class SqlCreateMaterializedView extends SqlCreate implements ExecutableSt SqlNodeList columns; @Getter SqlNode query; - @Nullable List store; - @Nullable String freshnessType; + @Nullable + List store; + @Nullable + String freshnessType; Integer freshnessTime; SqlIdentifier freshnessId; diff --git a/plugins/sql-language/src/main/java/org/polypheny/db/sql/language/ddl/altertable/SqlAlterSourceTableAddColumn.java b/plugins/sql-language/src/main/java/org/polypheny/db/sql/language/ddl/altertable/SqlAlterSourceTableAddColumn.java index 9dc5a6ff57..b3f840ec4a 100644 --- a/plugins/sql-language/src/main/java/org/polypheny/db/sql/language/ddl/altertable/SqlAlterSourceTableAddColumn.java +++ b/plugins/sql-language/src/main/java/org/polypheny/db/sql/language/ddl/altertable/SqlAlterSourceTableAddColumn.java @@ -51,9 +51,12 @@ public class SqlAlterSourceTableAddColumn extends SqlAlterTable { SqlIdentifier table; SqlIdentifier columnPhysical; SqlIdentifier columnLogical; - @Nullable SqlNode defaultValue; - @Nullable SqlIdentifier beforeColumnName; - @Nullable SqlIdentifier afterColumnName; + @Nullable + SqlNode defaultValue; + @Nullable + SqlIdentifier beforeColumnName; + @Nullable + SqlIdentifier afterColumnName; public SqlAlterSourceTableAddColumn( diff --git a/plugins/sql-language/src/main/java/org/polypheny/db/sql/language/fun/SqlCastFunction.java b/plugins/sql-language/src/main/java/org/polypheny/db/sql/language/fun/SqlCastFunction.java index 2a44a05803..24d809efe2 100644 --- a/plugins/sql-language/src/main/java/org/polypheny/db/sql/language/fun/SqlCastFunction.java +++ b/plugins/sql-language/src/main/java/org/polypheny/db/sql/language/fun/SqlCastFunction.java @@ -47,7 +47,7 @@ /** - * SqlCastFunction. Note that the std functions are really singleton objects, because they always get fetched via the StdOperatorTable. So you can't store any local info in the class + * SqlCastFunction. Note that the std functions are really singleton objects, because they always get fetched via the StdOperatorTable. So you can't storeId any local info in the class * and hence the return type data is maintained in operand[1] through the validation phase. */ public class SqlCastFunction extends SqlFunction { diff --git a/plugins/sql-language/src/main/java/org/polypheny/db/sql/language/pretty/SqlPrettyWriter.java b/plugins/sql-language/src/main/java/org/polypheny/db/sql/language/pretty/SqlPrettyWriter.java index ae59a74f21..3386a7910e 100644 --- a/plugins/sql-language/src/main/java/org/polypheny/db/sql/language/pretty/SqlPrettyWriter.java +++ b/plugins/sql-language/src/main/java/org/polypheny/db/sql/language/pretty/SqlPrettyWriter.java @@ -145,25 +145,25 @@ public class SqlPrettyWriter implements SqlWriter { @Setter private boolean selectListItemsOnSeparateLines; /** - * Sets whether to use a fix for SELECT list indentations. - *
    - *
  • If set to "false": - *
    -     *  SELECT
    -     *      A as A
    -     *          B as B
    -     *          C as C
    -     *      D
    -     *  
  • - *
  • If set to "true": - *
    -     *  SELECT
    -     *      A as A
    -     *      B as B
    -     *      C as C
    -     *      D
    -     *  
  • - *
+ * Sets whether to use a fix for SELECT list indentations. + *
    + *
  • If set to "false": + *
    +     * SELECT
    +     *     A as A
    +     *         B as B
    +     *         C as C
    +     *     D
    +     * 
  • + *
  • If set to "true": + *
    +     * SELECT
    +     *     A as A
    +     *     B as B
    +     *     C as C
    +     *     D
    +     * 
  • + *
*/ @Setter private boolean selectListExtraIndentFlag; diff --git a/plugins/sql-language/src/main/java/org/polypheny/db/sql/language/util/ReflectiveSqlOperatorTable.java b/plugins/sql-language/src/main/java/org/polypheny/db/sql/language/util/ReflectiveSqlOperatorTable.java index 1ccd8366a5..e3619c26c3 100644 --- a/plugins/sql-language/src/main/java/org/polypheny/db/sql/language/util/ReflectiveSqlOperatorTable.java +++ b/plugins/sql-language/src/main/java/org/polypheny/db/sql/language/util/ReflectiveSqlOperatorTable.java @@ -120,7 +120,7 @@ public List getOperatorList() { /** - * Key for looking up operators. The name is stored in upper-case because we store case-insensitively, even in a case-sensitive session. + * Key for looking up operators. The name is stored in upper-case because we storeId case-insensitively, even in a case-sensitive session. */ private static class Key extends Pair { diff --git a/plugins/sql-language/src/test/java/org/polypheny/db/sql/language/parser/SqlParserTest.java b/plugins/sql-language/src/test/java/org/polypheny/db/sql/language/parser/SqlParserTest.java index 2b5d4b07da..7feecd730f 100644 --- a/plugins/sql-language/src/test/java/org/polypheny/db/sql/language/parser/SqlParserTest.java +++ b/plugins/sql-language/src/test/java/org/polypheny/db/sql/language/parser/SqlParserTest.java @@ -441,6 +441,7 @@ public class SqlParserTest extends SqlLanguageDependent { "REGR_SYY", "2011", "2014", "c", "RELATIONAL", "c", // MV: Added for parsing DDLs "RELATIVE", "92", "99", + "RELATIONAL", "c", // MV: Added for parsing DDLs "RELEASE", "99", "2003", "2011", "2014", "c", "REPEAT", "92", "99", "2003", "RESET", "c", diff --git a/webui/src/main/java/org/polypheny/db/webui/models/AssetsModel.java b/webui/src/main/java/org/polypheny/db/webui/models/AssetsModel.java index 4d4c135f8b..b80d7f1057 100644 --- a/webui/src/main/java/org/polypheny/db/webui/models/AssetsModel.java +++ b/webui/src/main/java/org/polypheny/db/webui/models/AssetsModel.java @@ -35,4 +35,4 @@ public class AssetsModel { public final String SOURCE_ICON = "fa fa-plug"; -} +} \ No newline at end of file diff --git a/webui/src/main/java/org/polypheny/db/webui/models/requests/EditCollectionRequest.java b/webui/src/main/java/org/polypheny/db/webui/models/requests/EditCollectionRequest.java index 799b2e7ad7..1011aef052 100644 --- a/webui/src/main/java/org/polypheny/db/webui/models/requests/EditCollectionRequest.java +++ b/webui/src/main/java/org/polypheny/db/webui/models/requests/EditCollectionRequest.java @@ -34,7 +34,7 @@ public class EditCollectionRequest { public String action; // truncate / drop /** - * Identifier of the store on which the collection is edited + * Identifier of the storeId on which the collection is edited */ public String store;