Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 2 additions & 2 deletions core/trino-main/src/main/java/io/trino/metadata/Metadata.java
Original file line number Diff line number Diff line change
Expand Up @@ -127,15 +127,15 @@ Optional<TableExecuteHandle> getTableHandleForExecute(
* required by semantic analyzer to analyze the query.
*
* @throws RuntimeException if table handle is no longer valid
* @see {@link #getTableMetadata(Session, TableHandle)}
* @see #getTableMetadata(Session, TableHandle)
*/
TableSchema getTableSchema(Session session, TableHandle tableHandle);

/**
* Return the metadata for the specified table handle.
*
* @throws RuntimeException if table handle is no longer valid
* @see {@link #getTableSchema(Session, TableHandle)} which is less expensive.
* @see #getTableSchema(Session, TableHandle) a different method which is less expensive.
*/
TableMetadata getTableMetadata(Session session, TableHandle tableHandle);

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -411,7 +411,6 @@ private class BucketDataNode
* or one with an existing count and simply needs the count updated
* <p>
* processEntry handles these cases
* @param valueAndGroupHash
*/
private BucketDataNode(int bucketId, ValueNode valueNode, long valueHash, long valueAndGroupHash, int nodePointerToUse, boolean isEmpty)
{
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -82,7 +82,6 @@ public ChoicesSpecializedSqlScalarFunction(
* The first choice is the default choice, which is the one used for legacy access methods.
* The default choice must be usable under any context. (e.g. it must not use BLOCK_POSITION convention.)
*
* @param boundSignature
* @param choices the list of choices, ordered from generic to specific
*/
public ChoicesSpecializedSqlScalarFunction(BoundSignature boundSignature, List<ScalarImplementationChoice> choices)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -37,7 +37,6 @@ public interface Unnester
* @param startPosition The start input position of this batch.
* @param batchSize The number of input rows to be processed in this batch.
* @param outputRowCount The total output row count for this batch after the unnest is done.
* @return
*/
Block[] buildOutputBlocks(int[] outputEntriesPerPosition, int startPosition, int batchSize, int outputRowCount);

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,6 @@

import com.google.common.util.concurrent.ListenableFuture;
import io.airlift.units.DataSize;
import io.trino.ExceededSpillLimitException;
Copy link
Copy Markdown
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

crosses commit boundary?

Copy link
Copy Markdown
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

thanks

import org.weakref.jmx.Managed;

import javax.annotation.concurrent.GuardedBy;
Expand Down Expand Up @@ -45,8 +44,6 @@ public SpillSpaceTracker(DataSize maxSize)

/**
* Reserves the given number of bytes to spill. If more than the maximum, throws an exception.
*
* @throws ExceededSpillLimitException
*/
public synchronized ListenableFuture<Void> reserve(long bytes)
{
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -128,7 +128,7 @@ public static PlanMatchPattern any(PlanMatchPattern... sources)
* anyTree(tableScan("nation")) - will match to any plan which all leafs contain
* any node containing table scan from nation table.
*
* @note anyTree does not match zero nodes. E.g. output(anyTree(tableScan)) will NOT match TableScan node followed by OutputNode.
* Note: anyTree does not match zero nodes. E.g. output(anyTree(tableScan)) will NOT match TableScan node followed by OutputNode.
*/
public static PlanMatchPattern anyTree(PlanMatchPattern... sources)
{
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -179,7 +179,7 @@ default void executeTableExecute(ConnectorSession session, ConnectorTableExecute

/**
* Returns the system table for the specified table name, if one exists.
* The system tables handled via {@link #getSystemTable} differ form those returned by {@link Connector#getSystemTables()}.
* The system tables handled via this method differ form those returned by {@link Connector#getSystemTables()}.
* The former mechanism allows dynamic resolution of system tables, while the latter is
* based on static list of system tables built during startup.
*/
Expand Down Expand Up @@ -1106,7 +1106,7 @@ default Optional<ConstraintApplicationResult<ConnectorTableHandle>> applyFilter(
* c = CH2
* </pre>
* <p>
* The optimizer would call {@link #applyProjection} with the following arguments:
* The optimizer would call this method with the following arguments:
*
* <pre>
* handle = TH0
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -85,7 +85,6 @@ public T get(long index)
* Sets the element of this big array at specified index.
*
* @param index a position in this big array.
* @return true if the previous value was null
*/
public void set(long index, T value)
{
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -30,7 +30,6 @@
import io.trino.spi.TrinoException;
import org.apache.accumulo.core.client.BatchScanner;
import org.apache.accumulo.core.client.Connector;
import org.apache.accumulo.core.client.TableNotFoundException;
import org.apache.accumulo.core.data.Key;
import org.apache.accumulo.core.data.PartialKey;
import org.apache.accumulo.core.data.Range;
Expand Down Expand Up @@ -122,8 +121,6 @@ public void shutdown()
* @param earlyReturnThreshold Smallest acceptable cardinality to return early while other tasks complete
* @param pollingDuration Duration for polling the cardinality completion service
* @return An immutable multimap of cardinality to column constraint, sorted by cardinality from smallest to largest
* @throws TableNotFoundException If the metrics table does not exist
* @throws ExecutionException If another error occurs; I really don't even know anymore.
*/
public Multimap<Long, AccumuloColumnConstraint> getCardinalities(String schema, String table, Authorizations auths, Multimap<AccumuloColumnConstraint, Range> idxConstraintRangePairs, long earlyReturnThreshold, Duration pollingDuration)
{
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -137,7 +137,6 @@ default void updatePartitionStatistics(Table table, String partitionName, Functi
void revokeTablePrivileges(String databaseName, String tableName, String tableOwner, HivePrincipal grantee, HivePrincipal grantor, Set<HivePrivilege> privileges, boolean grantOption);

/**
* @param tableOwner
* @param principal when empty, all table privileges are returned
*/
Set<HivePrivilegeInfo> listTablePrivileges(String databaseName, String tableName, Optional<String> tableOwner, Optional<HivePrincipal> principal);
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -358,7 +358,6 @@ public static Optional<List<String>> partitionKeyFilterToStringList(List<String>

/**
* @param domain - domain expression for the column. null => TupleDomain.all()
* @param assumeCanonicalPartitionKeys
* @param partitionWildcardString wildcard
* @return string for scalar values
*/
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -113,7 +113,6 @@ public interface ThriftMetastore
void revokeTablePrivileges(String databaseName, String tableName, String tableOwner, HivePrincipal grantee, HivePrincipal grantor, Set<HivePrivilege> privileges, boolean grantOption);

/**
* @param tableOwner
* @param principal when empty, all table privileges are returned
*/
Set<HivePrivilegeInfo> listTablePrivileges(String databaseName, String tableName, Optional<String> tableOwner, Optional<HivePrincipal> principal);
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -1359,8 +1359,6 @@ private void doGetPartitionsFilterTest(

/**
* @param filterList should be same sized list as expectedValuesList
* @param expectedValuesList
* @throws Exception
*/
private void doGetPartitionsFilterTest(
List<ColumnMetadata> columnMetadata,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -41,7 +41,7 @@
import static org.apache.kafka.common.security.auth.SecurityProtocol.SSL;

/**
* {@KafkaSslConfig} manages Kafka SSL authentication and encryption between clients and brokers.
* Manages Kafka SSL authentication and encryption between clients and brokers.
*/
public class KafkaSslConfig
{
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -69,7 +69,6 @@ public static KuduOperationApplier fromKuduClientSession(KuduClientSession kuduC
* Not thread safe
* Applies an operation without waiting for it to be flushed, operations are flushed in the background
* @param operation kudu operation
* @throws KuduException
*/
public void applyOperationAsync(Operation operation)
throws KuduException
Expand Down