Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -32,11 +32,11 @@
import java.util.Set;
import java.util.TreeMap;
import java.util.TreeSet;
import java.util.function.BiPredicate;
import java.util.function.Function;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
import java.util.stream.Collectors;

import org.apache.hadoop.hbase.Coprocessor;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.TableName;
Expand Down Expand Up @@ -380,6 +380,10 @@ public TableDescriptorBuilder setCoprocessors(Collection<CoprocessorDescriptor>
return this;
}

public boolean hasCoprocessor(String classNameToMatch) {
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Adding method to a public interface. For hbase3 only so should be fine. This is filling in missing functionality?

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

This is a class, not an interface so I think it is OK? I think for a builder class it is also OK to have some 'get' methods to know what have we set into the builder?

return desc.hasCoprocessor(classNameToMatch);
}

public TableDescriptorBuilder setColumnFamily(final ColumnFamilyDescriptor family) {
desc.setColumnFamily(Objects.requireNonNull(family));
return this;
Expand Down Expand Up @@ -411,6 +415,16 @@ public TableDescriptorBuilder removeValue(byte[] key) {
return this;
}

public TableDescriptorBuilder removeValue(BiPredicate<Bytes, Bytes> predicate) {
List<Bytes> toRemove =
desc.getValues().entrySet().stream().filter(e -> predicate.test(e.getKey(), e.getValue()))
.map(Map.Entry::getKey).collect(Collectors.toList());
for (Bytes key : toRemove) {
removeValue(key);
}
return this;
}

public TableDescriptorBuilder removeColumnFamily(final byte[] name) {
desc.removeColumnFamily(name);
return this;
Expand Down Expand Up @@ -531,6 +545,10 @@ public TableDescriptorBuilder setValue(final byte[] key, final byte[] value) {
return this;
}

public String getValue(String key) {
return desc.getValue(key);
}

/**
* Sets replication scope all & only the columns already in the builder. Columns added later won't
* be backfilled with replication scope.
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -22,61 +22,53 @@
import org.apache.hadoop.hbase.client.Put;

/**
* Apply a {@link Constraint} (in traditional database terminology) to a HTable.
* Any number of {@link Constraint Constraints} can be added to the table, in
* any order.
* <p>
* Apply a {@link Constraint} (in traditional database terminology) to a Table. Any number of
* {@link Constraint Constraints} can be added to the table, in any order.
* <p/>
* A {@link Constraint} must be added to a table before the table is loaded via
* {@link Constraints#add(org.apache.hadoop.hbase.HTableDescriptor, Class[])} or
* {@link Constraints#add(org.apache.hadoop.hbase.HTableDescriptor,
* org.apache.hadoop.hbase.util.Pair...)}
* (if you want to add a configuration with the {@link Constraint}). Constraints
* will be run in the order that they are added. Further, a Constraint will be
* configured before it is run (on load).
* <p>
* See {@link Constraints#enableConstraint(org.apache.hadoop.hbase.HTableDescriptor, Class)} and
* {@link Constraints#disableConstraint(org.apache.hadoop.hbase.HTableDescriptor, Class)} for
* enabling/disabling of a given {@link Constraint} after it has been added.
* <p>
* {@link Constraints#add(org.apache.hadoop.hbase.client.TableDescriptorBuilder, Class...)} or
* {@link Constraints#add(org.apache.hadoop.hbase.client.TableDescriptorBuilder, org.apache.hadoop.hbase.util.Pair...)}
* (if you want to add a configuration with the {@link Constraint}). Constraints will be run in the
* order that they are added. Further, a Constraint will be configured before it is run (on load).
* <p/>
* See
* {@link Constraints#enableConstraint(org.apache.hadoop.hbase.client.TableDescriptorBuilder, Class)}
* and
* {@link Constraints#disableConstraint(org.apache.hadoop.hbase.client.TableDescriptorBuilder, Class)}
* for enabling/disabling of a given {@link Constraint} after it has been added.
* <p/>
* If a {@link Put} is invalid, the Constraint should throw some sort of
* {@link org.apache.hadoop.hbase.constraint.ConstraintException}, indicating
* that the {@link Put} has failed. When
* this exception is thrown, not further retries of the {@link Put} are
* attempted nor are any other {@link Constraint Constraints} attempted (the
* {@link Put} is clearly not valid). Therefore, there are performance
* implications in the order in which {@link BaseConstraint Constraints} are
* specified.
* <p>
* {@link org.apache.hadoop.hbase.constraint.ConstraintException}, indicating that the {@link Put}
* has failed. When this exception is thrown, not further retries of the {@link Put} are attempted
* nor are any other {@link Constraint Constraints} attempted (the {@link Put} is clearly not
* valid). Therefore, there are performance implications in the order in which {@link BaseConstraint
* Constraints} are specified.
* <p/>
* If a {@link Constraint} fails to fail the {@link Put} via a
* {@link org.apache.hadoop.hbase.constraint.ConstraintException}, but instead
* throws a {@link RuntimeException},
* the entire constraint processing mechanism ({@link ConstraintProcessor}) will
* be unloaded from the table. This ensures that the region server is still
* functional, but no more {@link Put Puts} will be checked via
* {@link Constraint Constraints}.
* <p>
* Further, {@link Constraint Constraints} should probably not be used to
* enforce cross-table references as it will cause tremendous write slowdowns,
* but it is possible.
* <p>
* {@link org.apache.hadoop.hbase.constraint.ConstraintException}, but instead throws a
* {@link RuntimeException}, the entire constraint processing mechanism
* ({@link ConstraintProcessor}) will be unloaded from the table. This ensures that the region
* server is still functional, but no more {@link Put Puts} will be checked via {@link Constraint
* Constraints}.
* <p/>
* Further, {@link Constraint Constraints} should probably not be used to enforce cross-table
* references as it will cause tremendous write slowdowns, but it is possible.
* <p/>
* NOTE: Implementing classes must have a nullary (no-args) constructor
*
* @see BaseConstraint
* @see Constraints
*/
@InterfaceAudience.Private
public interface Constraint extends Configurable {

/**
* Check a {@link Put} to ensure it is valid for the table. If the {@link Put}
* is valid, then just return from the method. Otherwise, throw an
* {@link Exception} specifying what happened. This {@link Exception} is
* propagated back to the client so you can see what caused the {@link Put} to
* fail.
* Check a {@link Put} to ensure it is valid for the table. If the {@link Put} is valid, then just
* return from the method. Otherwise, throw an {@link Exception} specifying what happened. This
* {@link Exception} is propagated back to the client so you can see what caused the {@link Put}
* to fail.
* @param p {@link Put} to check
* @throws org.apache.hadoop.hbase.constraint.ConstraintException when the
* {@link Put} does not match the
* constraint.
* @throws org.apache.hadoop.hbase.constraint.ConstraintException when the {@link Put} does not
* match the constraint.
*/
void check(Put p) throws ConstraintException;

Expand Down
Loading