diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java index 10a38f63c3a1..f55fc477497c 100644 --- a/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java @@ -1601,6 +1601,16 @@ public enum OperationStatusCode { */ public static final int BATCH_ROWS_THRESHOLD_DEFAULT = 5000; + /** + * List of column families that cannot be deleted from the hbase:meta table. + * They are critical to cluster operation. This is a bit of an odd place to + * keep this list but then this is the tooling that does add/remove. Keeping + * it local! + */ + public static final List UNDELETABLE_META_COLUMNFAMILIES = Collections.unmodifiableList( + Arrays.asList(HConstants.CATALOG_FAMILY, HConstants.TABLE_FAMILY, + HConstants.REPLICATION_BARRIER_FAMILY)); + private HConstants() { // Can't be instantiated with this ctor. } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/ModifyTableProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/ModifyTableProcedure.java index 247dd9c202f4..d7158fc68ffc 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/ModifyTableProcedure.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/ModifyTableProcedure.java @@ -19,8 +19,6 @@ package org.apache.hadoop.hbase.master.procedure; import java.io.IOException; -import java.util.Arrays; -import java.util.Collections; import java.util.List; import java.util.Set; import java.util.function.Supplier; @@ -59,15 +57,6 @@ public class ModifyTableProcedure private TableDescriptor modifiedTableDescriptor; private boolean deleteColumnFamilyInModify; private boolean shouldCheckDescriptor; - /** - * List of column families that cannot be deleted from the hbase:meta table. - * They are critical to cluster operation. This is a bit of an odd place to - * keep this list but then this is the tooling that does add/remove. Keeping - * it local! - */ - private static final List UNDELETABLE_META_COLUMNFAMILIES = - Collections.unmodifiableList(Arrays.asList( - HConstants.CATALOG_FAMILY, HConstants.TABLE_FAMILY, HConstants.REPLICATION_BARRIER_FAMILY)); public ModifyTableProcedure() { super(); @@ -102,7 +91,7 @@ protected void preflightChecks(MasterProcedureEnv env, Boolean enabled) throws H // If we are modifying the hbase:meta table, make sure we are not deleting critical // column families else we'll damage the cluster. Set cfs = this.modifiedTableDescriptor.getColumnFamilyNames(); - for (byte[] family : UNDELETABLE_META_COLUMNFAMILIES) { + for (byte[] family : HConstants.UNDELETABLE_META_COLUMNFAMILIES) { if (!cfs.contains(family)) { throw new HBaseIOException("Delete of hbase:meta column family " + Bytes.toString(family)); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSTableDescriptors.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSTableDescriptors.java index 4fb231234e64..460a8c1125bf 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSTableDescriptors.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSTableDescriptors.java @@ -46,6 +46,7 @@ import org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint; import org.apache.hadoop.hbase.exceptions.DeserializationException; import org.apache.hadoop.hbase.regionserver.BloomType; +import org.apache.hadoop.hbase.regionserver.NoSuchColumnFamilyException; import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -94,6 +95,10 @@ public class FSTableDescriptors implements TableDescriptors { // TODO. private final Map cache = new ConcurrentHashMap<>(); + // Default meta table descriptor, will be used by RegionServer during rolling upgrade until + // HMaster write latest 2.x meta table descriptor + private TableDescriptor defaultMetaTableDesc = null; + /** * Construct a FSTableDescriptors instance using the hbase root dir of the given conf and the * filesystem where that root dir lives. This instance can do write operations (is not read only). @@ -112,6 +117,14 @@ public FSTableDescriptors(final FileSystem fs, final Path rootdir, final boolean this.rootdir = rootdir; this.fsreadonly = fsreadonly; this.usecache = usecache; + // Create default in-memory meta table descriptor for RegionServer + if (this.fsreadonly) { + try { + defaultMetaTableDesc = createMetaTableDescriptorBuilder(fs.getConf()).build(); + } catch (IOException ioe) { + LOG.warn("Exception occurred while creating meta table descriptor", ioe); + } + } } public static void tryUpdateMetaTableDescriptor(Configuration conf) throws IOException { @@ -123,8 +136,10 @@ public static TableDescriptor tryUpdateAndGetMetaTableDescriptor(Configuration c FileSystem fs, Path rootdir) throws IOException { // see if we already have meta descriptor on fs. Write one if not. try { - return getTableDescriptorFromFs(fs, rootdir, TableName.META_TABLE_NAME); - } catch (TableInfoMissingException e) { + TableDescriptor td = getTableDescriptorFromFs(fs, rootdir, TableName.META_TABLE_NAME); + validateMetaTableDescriptor(td); + return td; + } catch (TableInfoMissingException | NoSuchColumnFamilyException e) { TableDescriptorBuilder builder = createMetaTableDescriptorBuilder(conf); TableDescriptor td = builder.build(); LOG.info("Creating new hbase:meta table descriptor {}", td); @@ -139,6 +154,21 @@ public static TableDescriptor tryUpdateAndGetMetaTableDescriptor(Configuration c } } + /** + * Validate meta table descriptor whether default column families exist or not + */ + private static void validateMetaTableDescriptor(TableDescriptor td) + throws NoSuchColumnFamilyException, TableInfoMissingException { + if (td == null) { + throw new TableInfoMissingException("Meta .tableinfo not found"); + } + for (byte[] cf : HConstants.UNDELETABLE_META_COLUMNFAMILIES) { + if (!td.hasColumnFamily(cf)) { + throw new NoSuchColumnFamilyException("Column family " + Bytes.toString(cf) + " not found"); + } + } + } + private static TableDescriptorBuilder createMetaTableDescriptorBuilder(final Configuration conf) throws IOException { // TODO We used to set CacheDataInL1 for META table. When we have BucketCache in file mode, now @@ -223,6 +253,21 @@ public TableDescriptor get(TableName tableName) { } catch (NullPointerException | IOException ioe) { LOG.debug("Exception during readTableDecriptor. Current table name = " + tableName, ioe); } + + // Validate whether meta table descriptor is in HBase 2.x format + if (TableName.isMetaTableName(tableName) && defaultMetaTableDesc != null) { + try { + validateMetaTableDescriptor(tdmt); + // FS have proper meta table descriptor, we don't need to validate it again. + // Reset defaultMetaTableDesc + defaultMetaTableDesc = null; + } catch (TableInfoMissingException | NoSuchColumnFamilyException e) { + // Meta is still in old format, return the default meta table descriptor util we have meta + // descriptor in HBase 2.x format + return defaultMetaTableDesc; + } + } + // last HTD written wins if (usecache && tdmt != null) { this.cache.put(tableName, tdmt);